xref: /OK3568_Linux_fs/external/rkwifibt/drivers/bcmdhd/dhd.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Header file describing the internal (inter-module) DHD interfaces.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Provides type definitions and function prototypes used to link the
5*4882a593Smuzhiyun  * DHD OS, bus, and protocol modules.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright (C) 2020, Broadcom.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  *      Unless you and Broadcom execute a separate written software license
10*4882a593Smuzhiyun  * agreement governing use of this software, this software is licensed to you
11*4882a593Smuzhiyun  * under the terms of the GNU General Public License version 2 (the "GPL"),
12*4882a593Smuzhiyun  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13*4882a593Smuzhiyun  * following added to such license:
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  *      As a special exception, the copyright holders of this software give you
16*4882a593Smuzhiyun  * permission to link this software with independent modules, and to copy and
17*4882a593Smuzhiyun  * distribute the resulting executable under terms of your choice, provided that
18*4882a593Smuzhiyun  * you also meet, for each linked independent module, the terms and conditions of
19*4882a593Smuzhiyun  * the license of that module.  An independent module is a module which is not
20*4882a593Smuzhiyun  * derived from this software.  The special exception does not apply to any
21*4882a593Smuzhiyun  * modifications of the software.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * <<Broadcom-WL-IPTag/Open:>>
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * $Id$
27*4882a593Smuzhiyun  */
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /****************
30*4882a593Smuzhiyun  * Common types *
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #ifndef _dhd_h_
34*4882a593Smuzhiyun #define _dhd_h_
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #if defined(LINUX)
37*4882a593Smuzhiyun #include <linux/firmware.h>
38*4882a593Smuzhiyun #include <linux/init.h>
39*4882a593Smuzhiyun #include <linux/kernel.h>
40*4882a593Smuzhiyun #include <linux/slab.h>
41*4882a593Smuzhiyun #include <linux/skbuff.h>
42*4882a593Smuzhiyun #include <linux/netdevice.h>
43*4882a593Smuzhiyun #include <linux/etherdevice.h>
44*4882a593Smuzhiyun #include <linux/random.h>
45*4882a593Smuzhiyun #include <linux/spinlock.h>
46*4882a593Smuzhiyun #include <linux/ethtool.h>
47*4882a593Smuzhiyun #include <linux/proc_fs.h>
48*4882a593Smuzhiyun #include <asm/uaccess.h>
49*4882a593Smuzhiyun #include <asm/unaligned.h>
50*4882a593Smuzhiyun #include <linux/fs.h>
51*4882a593Smuzhiyun #include <linux/namei.h>
52*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
53*4882a593Smuzhiyun #include <uapi/linux/sched/types.h>
54*4882a593Smuzhiyun #elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
55*4882a593Smuzhiyun #include <linux/sched/types.h>
56*4882a593Smuzhiyun #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) */
57*4882a593Smuzhiyun #ifdef DHD_BUZZZ_LOG_ENABLED
58*4882a593Smuzhiyun #include <dhd_buzzz.h>
59*4882a593Smuzhiyun #endif /* DHD_BUZZZ_LOG_ENABLED */
60*4882a593Smuzhiyun /* The kernel threading is sdio-specific */
61*4882a593Smuzhiyun struct task_struct;
62*4882a593Smuzhiyun struct sched_param;
63*4882a593Smuzhiyun #if defined(BT_OVER_SDIO)
64*4882a593Smuzhiyun #include <dhd_bt_interface.h>
65*4882a593Smuzhiyun #endif /* defined (BT_OVER_SDIO) */
66*4882a593Smuzhiyun int setScheduler(struct task_struct *p, int policy, struct sched_param *param);
67*4882a593Smuzhiyun int get_scheduler_policy(struct task_struct *p);
68*4882a593Smuzhiyun #else /* LINUX */
69*4882a593Smuzhiyun #define ENOMEM		1
70*4882a593Smuzhiyun #define EFAULT      2
71*4882a593Smuzhiyun #define EINVAL		3
72*4882a593Smuzhiyun #define EIO			4
73*4882a593Smuzhiyun #define ETIMEDOUT	5
74*4882a593Smuzhiyun #define ENODATA 6
75*4882a593Smuzhiyun #define EREMOTEIO   7
76*4882a593Smuzhiyun #define ENODEV      8
77*4882a593Smuzhiyun #define ERESTARTSYS 512
78*4882a593Smuzhiyun #endif /* LINUX */
79*4882a593Smuzhiyun #define MAX_EVENT	16
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #define ALL_INTERFACES	0xff
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /* H2D and D2H ring dump is enabled by default */
84*4882a593Smuzhiyun #ifdef PCIE_FULL_DONGLE
85*4882a593Smuzhiyun #define DHD_DUMP_PCIE_RINGS
86*4882a593Smuzhiyun #endif /* PCIE_FULL_DONGLE */
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun #include <osl.h>
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun #include <wlioctl.h>
91*4882a593Smuzhiyun #include <dhdioctl.h>
92*4882a593Smuzhiyun #include <wlfc_proto.h>
93*4882a593Smuzhiyun #include <hnd_armtrap.h>
94*4882a593Smuzhiyun #if defined(DUMP_IOCTL_IOV_LIST) || defined(DHD_DEBUG)
95*4882a593Smuzhiyun #include <bcmutils.h>
96*4882a593Smuzhiyun #endif /* DUMP_IOCTL_IOV_LIST || DHD_DEBUG */
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun #if defined(BCMWDF)
99*4882a593Smuzhiyun #include <wdf.h>
100*4882a593Smuzhiyun #include <WdfMiniport.h>
101*4882a593Smuzhiyun #endif /* (BCMWDF)  */
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
104*4882a593Smuzhiyun #include <dnglioctl.h>
105*4882a593Smuzhiyun #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun #ifdef DHD_ERPOM
108*4882a593Smuzhiyun #include <pom.h>
109*4882a593Smuzhiyun #ifdef PCIE_OOB
110*4882a593Smuzhiyun /*
111*4882a593Smuzhiyun  * Both ERPOM and PCIE_OOB depend on ftdi to programme GPIOs.
112*4882a593Smuzhiyun  * Both features operating parallelly make the GPIOs go outof sync.
113*4882a593Smuzhiyun  * So only one feature is expected to be present at a time.
114*4882a593Smuzhiyun  */
115*4882a593Smuzhiyun #error "PCIE_OOB enabled"
116*4882a593Smuzhiyun #endif /* PCIE_OOB */
117*4882a593Smuzhiyun #endif /* DHD_ERPOM */
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun #include <dngl_stats.h>
120*4882a593Smuzhiyun #include <hnd_pktq.h>
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun #ifdef DEBUG_DPC_THREAD_WATCHDOG
123*4882a593Smuzhiyun #define MAX_RESCHED_CNT 600
124*4882a593Smuzhiyun #endif /* DEBUG_DPC_THREAD_WATCHDOG */
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun #if defined(LINUX) || defined(linux)
127*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) && LINUX_VERSION_CODE < \
128*4882a593Smuzhiyun 	KERNEL_VERSION(3, 18, 0) || defined(CONFIG_BCMDHD_VENDOR_EXT))
129*4882a593Smuzhiyun #define WL_VENDOR_EXT_SUPPORT
130*4882a593Smuzhiyun #endif /* 3.18 > KERNEL_VER >= 3.14 || defined(CONFIG_BCMDHD_VENDOR_EXT) */
131*4882a593Smuzhiyun #endif /* defined (LINUX) || defined(linux) */
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun #if defined(KEEP_ALIVE)
134*4882a593Smuzhiyun /* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
135*4882a593Smuzhiyun #define KEEP_ALIVE_PERIOD 55000
136*4882a593Smuzhiyun #define NULL_PKT_STR	"null_pkt"
137*4882a593Smuzhiyun #endif /* KEEP_ALIVE */
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun /* By default enabled from here, later the WQ code will be removed */
140*4882a593Smuzhiyun #define DHD_USE_KTHREAD_FOR_LOGTRACE
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun /* Forward decls */
143*4882a593Smuzhiyun struct dhd_bus;
144*4882a593Smuzhiyun struct dhd_prot;
145*4882a593Smuzhiyun struct dhd_info;
146*4882a593Smuzhiyun struct dhd_ioctl;
147*4882a593Smuzhiyun struct dhd_dbg;
148*4882a593Smuzhiyun struct dhd_ts;
149*4882a593Smuzhiyun #ifdef DNGL_AXI_ERROR_LOGGING
150*4882a593Smuzhiyun struct dhd_axi_error_dump;
151*4882a593Smuzhiyun #endif /* DNGL_AXI_ERROR_LOGGING */
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun /* The level of bus communication with the dongle */
154*4882a593Smuzhiyun enum dhd_bus_state {
155*4882a593Smuzhiyun 	DHD_BUS_DOWN,		/* Not ready for frame transfers */
156*4882a593Smuzhiyun 	DHD_BUS_LOAD,		/* Download access only (CPU reset) */
157*4882a593Smuzhiyun 	DHD_BUS_DATA,		/* Ready for frame transfers */
158*4882a593Smuzhiyun 	DHD_BUS_SUSPEND,	/* Bus has been suspended */
159*4882a593Smuzhiyun 	DHD_BUS_DOWN_IN_PROGRESS,	/* Bus going Down */
160*4882a593Smuzhiyun 	DHD_BUS_REMOVE,	/* Bus has been removed */
161*4882a593Smuzhiyun };
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun /* The level of bus communication with the dongle */
164*4882a593Smuzhiyun enum dhd_bus_devreset_type {
165*4882a593Smuzhiyun 	DHD_BUS_DEVRESET_ON = 0,	/* ON */
166*4882a593Smuzhiyun 	DHD_BUS_DEVRESET_OFF = 1,		/* OFF */
167*4882a593Smuzhiyun 	DHD_BUS_DEVRESET_FLR = 2,		/* FLR */
168*4882a593Smuzhiyun 	DHD_BUS_DEVRESET_FLR_FORCE_FAIL = 3,	/* FLR FORCE FAIL */
169*4882a593Smuzhiyun 	DHD_BUS_DEVRESET_QUIESCE = 4,		/* FLR */
170*4882a593Smuzhiyun };
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun /*
173*4882a593Smuzhiyun  * Bit fields to Indicate clean up process that wait till they are finished.
174*4882a593Smuzhiyun  * Future synchronizable processes can add their bit filed below and update
175*4882a593Smuzhiyun  * their functionalities accordingly
176*4882a593Smuzhiyun  */
177*4882a593Smuzhiyun #define DHD_BUS_BUSY_IN_TX                   0x01
178*4882a593Smuzhiyun #define DHD_BUS_BUSY_IN_SEND_PKT             0x02
179*4882a593Smuzhiyun #define DHD_BUS_BUSY_IN_DPC                  0x04
180*4882a593Smuzhiyun #define DHD_BUS_BUSY_IN_WD                   0x08
181*4882a593Smuzhiyun #define DHD_BUS_BUSY_IN_IOVAR                0x10
182*4882a593Smuzhiyun #define DHD_BUS_BUSY_IN_DHD_IOVAR            0x20
183*4882a593Smuzhiyun #define DHD_BUS_BUSY_SUSPEND_IN_PROGRESS     0x40
184*4882a593Smuzhiyun #define DHD_BUS_BUSY_RESUME_IN_PROGRESS      0x80
185*4882a593Smuzhiyun #define DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS 0x100
186*4882a593Smuzhiyun #define DHD_BUS_BUSY_RPM_SUSPEND_DONE        0x200
187*4882a593Smuzhiyun #define DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS  0x400
188*4882a593Smuzhiyun #define DHD_BUS_BUSY_RPM_ALL                 (DHD_BUS_BUSY_RPM_SUSPEND_DONE | \
189*4882a593Smuzhiyun 		DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS | \
190*4882a593Smuzhiyun 		DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS)
191*4882a593Smuzhiyun #define DHD_BUS_BUSY_IN_CHECKDIED		0x800
192*4882a593Smuzhiyun #define DHD_BUS_BUSY_IN_MEMDUMP			0x1000
193*4882a593Smuzhiyun #define DHD_BUS_BUSY_IN_SSSRDUMP		0x2000
194*4882a593Smuzhiyun #define DHD_BUS_BUSY_IN_LOGDUMP			0x4000
195*4882a593Smuzhiyun #define DHD_BUS_BUSY_IN_HALDUMP			0x8000
196*4882a593Smuzhiyun #define DHD_BUS_BUSY_IN_NAPI			0x10000
197*4882a593Smuzhiyun #define DHD_BUS_BUSY_IN_DS_DEASSERT		0x20000
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun #define DHD_BUS_BUSY_SET_IN_TX(dhdp) \
200*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_TX
201*4882a593Smuzhiyun #define DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp) \
202*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SEND_PKT
203*4882a593Smuzhiyun #define DHD_BUS_BUSY_SET_IN_DPC(dhdp) \
204*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DPC
205*4882a593Smuzhiyun #define DHD_BUS_BUSY_SET_IN_WD(dhdp) \
206*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_WD
207*4882a593Smuzhiyun #define DHD_BUS_BUSY_SET_IN_IOVAR(dhdp) \
208*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_IOVAR
209*4882a593Smuzhiyun #define DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhdp) \
210*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DHD_IOVAR
211*4882a593Smuzhiyun #define DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(dhdp) \
212*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_SUSPEND_IN_PROGRESS
213*4882a593Smuzhiyun #define DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(dhdp) \
214*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RESUME_IN_PROGRESS
215*4882a593Smuzhiyun #define DHD_BUS_BUSY_SET_RPM_SUSPEND_IN_PROGRESS(dhdp) \
216*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS
217*4882a593Smuzhiyun #define DHD_BUS_BUSY_SET_RPM_SUSPEND_DONE(dhdp) \
218*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_SUSPEND_DONE
219*4882a593Smuzhiyun #define DHD_BUS_BUSY_SET_RPM_RESUME_IN_PROGRESS(dhdp) \
220*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS
221*4882a593Smuzhiyun #define DHD_BUS_BUSY_SET_IN_CHECKDIED(dhdp) \
222*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_CHECKDIED
223*4882a593Smuzhiyun #define DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp) \
224*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_MEMDUMP
225*4882a593Smuzhiyun #define DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp) \
226*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_SSSRDUMP
227*4882a593Smuzhiyun #define DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp) \
228*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_LOGDUMP
229*4882a593Smuzhiyun #define DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp) \
230*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_HALDUMP
231*4882a593Smuzhiyun #define DHD_BUS_BUSY_SET_IN_NAPI(dhdp) \
232*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_NAPI
233*4882a593Smuzhiyun #define DHD_BUS_BUSY_SET_IN_DS_DEASSERT(dhdp) \
234*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state |= DHD_BUS_BUSY_IN_DS_DEASSERT
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun #define DHD_BUS_BUSY_CLEAR_IN_TX(dhdp) \
237*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_TX
238*4882a593Smuzhiyun #define DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp) \
239*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SEND_PKT
240*4882a593Smuzhiyun #define DHD_BUS_BUSY_CLEAR_IN_DPC(dhdp) \
241*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DPC
242*4882a593Smuzhiyun #define DHD_BUS_BUSY_CLEAR_IN_WD(dhdp) \
243*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_WD
244*4882a593Smuzhiyun #define DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhdp) \
245*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_IOVAR
246*4882a593Smuzhiyun #define DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhdp) \
247*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DHD_IOVAR
248*4882a593Smuzhiyun #define DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(dhdp) \
249*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_SUSPEND_IN_PROGRESS
250*4882a593Smuzhiyun #define DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(dhdp) \
251*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RESUME_IN_PROGRESS
252*4882a593Smuzhiyun #define DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhdp) \
253*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS
254*4882a593Smuzhiyun #define DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_DONE(dhdp) \
255*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_SUSPEND_DONE
256*4882a593Smuzhiyun #define DHD_BUS_BUSY_CLEAR_RPM_RESUME_IN_PROGRESS(dhdp) \
257*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS
258*4882a593Smuzhiyun #define DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(dhdp) \
259*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_CHECKDIED
260*4882a593Smuzhiyun #define DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(dhdp) \
261*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_MEMDUMP
262*4882a593Smuzhiyun #define DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp) \
263*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_SSSRDUMP
264*4882a593Smuzhiyun #define DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp) \
265*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_LOGDUMP
266*4882a593Smuzhiyun #define DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp) \
267*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_HALDUMP
268*4882a593Smuzhiyun #define DHD_BUS_BUSY_CLEAR_IN_NAPI(dhdp) \
269*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_NAPI
270*4882a593Smuzhiyun #define DHD_BUS_BUSY_CLEAR_IN_DS_DEASSERT(dhdp) \
271*4882a593Smuzhiyun 	(dhdp)->dhd_bus_busy_state &= ~DHD_BUS_BUSY_IN_DS_DEASSERT
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun #define DHD_BUS_BUSY_CHECK_IN_TX(dhdp) \
274*4882a593Smuzhiyun 	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_TX)
275*4882a593Smuzhiyun #define DHD_BUS_BUSY_CHECK_IN_SEND_PKT(dhdp) \
276*4882a593Smuzhiyun 	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_SEND_PKT)
277*4882a593Smuzhiyun #define DHD_BUS_BUSY_CHECK_IN_DPC(dhdp) \
278*4882a593Smuzhiyun 	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_DPC)
279*4882a593Smuzhiyun #define DHD_BUS_BUSY_CHECK_IN_WD(dhdp) \
280*4882a593Smuzhiyun 	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_WD)
281*4882a593Smuzhiyun #define DHD_BUS_BUSY_CHECK_IN_IOVAR(dhdp) \
282*4882a593Smuzhiyun 	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_IOVAR)
283*4882a593Smuzhiyun #define DHD_BUS_BUSY_CHECK_IN_DHD_IOVAR(dhdp) \
284*4882a593Smuzhiyun 	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_DHD_IOVAR)
285*4882a593Smuzhiyun #define DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp) \
286*4882a593Smuzhiyun 	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_SUSPEND_IN_PROGRESS)
287*4882a593Smuzhiyun #define DHD_BUS_BUSY_CHECK_RESUME_IN_PROGRESS(dhdp) \
288*4882a593Smuzhiyun 	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RESUME_IN_PROGRESS)
289*4882a593Smuzhiyun #define DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(dhdp) \
290*4882a593Smuzhiyun 	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_IN_PROGRESS)
291*4882a593Smuzhiyun #define DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(dhdp) \
292*4882a593Smuzhiyun 	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_SUSPEND_DONE)
293*4882a593Smuzhiyun #define DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(dhdp) \
294*4882a593Smuzhiyun 	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_RESUME_IN_PROGRESS)
295*4882a593Smuzhiyun #define DHD_BUS_BUSY_CHECK_RPM_ALL(dhdp) \
296*4882a593Smuzhiyun 	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_RPM_ALL)
297*4882a593Smuzhiyun #define DHD_BUS_BUSY_CHECK_IN_CHECKDIED(dhdp) \
298*4882a593Smuzhiyun 	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_CHECKDIED)
299*4882a593Smuzhiyun #define DHD_BUS_BUSY_CHECK_IN_MEMDUMP(dhdp) \
300*4882a593Smuzhiyun 	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_MEMDUMP)
301*4882a593Smuzhiyun #define DHD_BUS_BUSY_CHECK_IN_SSSRDUMP(dhdp) \
302*4882a593Smuzhiyun 	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_SSSRDUMP)
303*4882a593Smuzhiyun #define DHD_BUS_BUSY_CHECK_IN_LOGDUMP(dhdp) \
304*4882a593Smuzhiyun 	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_LOGDUMP)
305*4882a593Smuzhiyun #define DHD_BUS_BUSY_CHECK_IN_HALDUMP(dhdp) \
306*4882a593Smuzhiyun 	((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP)
307*4882a593Smuzhiyun #define DHD_BUS_BUSY_CHECK_IN_DS_DEASSERT(dhdp) \
308*4882a593Smuzhiyun 		((dhdp)->dhd_bus_busy_state & DHD_BUS_BUSY_IN_DS_DEASSERT)
309*4882a593Smuzhiyun #define DHD_BUS_BUSY_CHECK_IDLE(dhdp) \
310*4882a593Smuzhiyun 	((dhdp)->dhd_bus_busy_state == 0)
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun #define DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp) \
313*4882a593Smuzhiyun 	((dhdp)->busstate == DHD_BUS_SUSPEND || DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp))
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun #define DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(dhdp) \
316*4882a593Smuzhiyun 		(DHD_BUS_BUSY_CHECK_SUSPEND_IN_PROGRESS(dhdp) || \
317*4882a593Smuzhiyun 		 DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(dhdp))
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun #define DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp) \
320*4882a593Smuzhiyun 	((dhdp)->busstate == DHD_BUS_SUSPEND || DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(dhdp))
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun #define DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp) \
323*4882a593Smuzhiyun 		((dhdp)->busstate == DHD_BUS_DOWN || (dhdp)->busstate == DHD_BUS_DOWN_IN_PROGRESS || \
324*4882a593Smuzhiyun 		(dhdp)->busstate == DHD_BUS_REMOVE)
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun #define DHD_BUS_CHECK_REMOVE(dhdp) \
327*4882a593Smuzhiyun 		((dhdp)->busstate == DHD_BUS_REMOVE)
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun /* IOVar flags for common error checks */
330*4882a593Smuzhiyun #define DHD_IOVF_PWRREQ_BYPASS	(1<<0) /* flags to prevent bp access during host sleep state */
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun #define MAX_MTU_SZ (1600u)
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun #ifdef PCIE_INB_DW
335*4882a593Smuzhiyun #define DHD_CHECK_CFG_IN_PROGRESS(dhdp) \
336*4882a593Smuzhiyun 	((INBAND_DW_ENAB((dhdp)->bus)) ? dhd_check_cfg_in_progress(dhdp) : FALSE)
337*4882a593Smuzhiyun #else
338*4882a593Smuzhiyun #define DHD_CHECK_CFG_IN_PROGRESS(dhdp) FALSE
339*4882a593Smuzhiyun #endif /* PCIE_INB_DW */
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun #ifndef USEC_PER_SEC
342*4882a593Smuzhiyun #define USEC_PER_SEC (1000 * 1000)
343*4882a593Smuzhiyun #endif
344*4882a593Smuzhiyun #if (defined (LINUX) || defined(linux))
345*4882a593Smuzhiyun /* (u64)result = (u64)dividend / (u64)divisor */
346*4882a593Smuzhiyun #define DIV_U64_BY_U64(dividend, divisor)	div64_u64(dividend, divisor)
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun /* (u64)result = (u64)dividend / (u32)divisor */
349*4882a593Smuzhiyun #define DIV_U64_BY_U32(dividend, divisor)	div_u64(dividend, divisor)
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun /* Be careful while using this, as it divides dividend also
352*4882a593Smuzhiyun  * (u32)remainder = (u64)dividend % (u32)divisor
353*4882a593Smuzhiyun  * (u64)dividend = (u64)dividend / (u32)divisor
354*4882a593Smuzhiyun  */
355*4882a593Smuzhiyun #define DIV_AND_MOD_U64_BY_U32(dividend, divisor)	do_div(dividend, divisor)
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun /* (u32)remainder = (u64)dividend % (u32)divisor */
358*4882a593Smuzhiyun #define MOD_U64_BY_U32(dividend, divisor) ({				\
359*4882a593Smuzhiyun 	uint64 temp_dividend = (dividend);				\
360*4882a593Smuzhiyun 	uint32 rem = DIV_AND_MOD_U64_BY_U32(temp_dividend, (divisor));	\
361*4882a593Smuzhiyun 	rem;								\
362*4882a593Smuzhiyun })
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun #define SEC_USEC_FMT \
365*4882a593Smuzhiyun 	"%5llu.%06u"
366*4882a593Smuzhiyun #else
367*4882a593Smuzhiyun /* (u64)result = (u64)dividend / (u64)divisor */
368*4882a593Smuzhiyun #define DIV_U64_BY_U64(dividend, divisor)	(uint64)(dividend) / (uint64)(divisor)
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun /* (u64)result = (u64)dividend / (u32)divisor */
371*4882a593Smuzhiyun #define DIV_U64_BY_U32(dividend, divisor)	(uint64)(dividend) / (uint32)(divisor)
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun /* Be careful while using this, as it divides dividend also
374*4882a593Smuzhiyun  * (u32)remainder = (u64)dividend % (u32)divisor
375*4882a593Smuzhiyun  * (u64)dividend = (u64)dividend / (u32)divisor
376*4882a593Smuzhiyun  */
377*4882a593Smuzhiyun #define DIV_AND_MOD_U64_BY_U32(dividend, divisor) ({		\
378*4882a593Smuzhiyun 	uint32 rem = (uint64)(dividend) % (uint32)(divisor);	\
379*4882a593Smuzhiyun 	(dividend) = (uint64)(dividend) / (uint32)(divisor);	\
380*4882a593Smuzhiyun 	rem;							\
381*4882a593Smuzhiyun })
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun /* (u32)remainder = (u64)dividend % (u32)divisor */
384*4882a593Smuzhiyun #define MOD_U64_BY_U32(dividend, divisor)	(uint32)((uint64)(dividend) % (uint32)(divisor))
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun #define SEC_USEC_FMT \
387*4882a593Smuzhiyun 	"%015llu.%06u"
388*4882a593Smuzhiyun #endif /* LINUX || linux */
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun /* t: time in nano second */
391*4882a593Smuzhiyun #define GET_SEC_USEC(t) \
392*4882a593Smuzhiyun 	DIV_U64_BY_U32(t, NSEC_PER_SEC), \
393*4882a593Smuzhiyun 	((uint32)(MOD_U64_BY_U32(t, NSEC_PER_SEC) / (uint32)NSEC_PER_USEC))
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun /* Download Types */
396*4882a593Smuzhiyun typedef enum download_type {
397*4882a593Smuzhiyun 	FW,
398*4882a593Smuzhiyun 	NVRAM,
399*4882a593Smuzhiyun 	CLM_BLOB,
400*4882a593Smuzhiyun 	TXCAP_BLOB
401*4882a593Smuzhiyun } download_type_t;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun #if defined(NDIS)
404*4882a593Smuzhiyun /* Firmware requested operation mode */
405*4882a593Smuzhiyun #define STA_MASK			0x0001
406*4882a593Smuzhiyun #define HOSTAPD_MASK		0x0002
407*4882a593Smuzhiyun #define WFD_MASK			0x0004
408*4882a593Smuzhiyun #define SOFTAP_FW_MASK	0x0008
409*4882a593Smuzhiyun #define P2P_GO_ENABLED		0x0010
410*4882a593Smuzhiyun #define P2P_GC_ENABLED		0x0020
411*4882a593Smuzhiyun #define CONCURENT_MASK		0x00F0
412*4882a593Smuzhiyun #endif /* #if defined(NDIS) */
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun /* For supporting multiple interfaces */
415*4882a593Smuzhiyun #define DHD_MAX_IFS			16
416*4882a593Smuzhiyun #ifndef DHD_MAX_STATIC_IFS
417*4882a593Smuzhiyun #define DHD_MAX_STATIC_IFS	1
418*4882a593Smuzhiyun #endif
419*4882a593Smuzhiyun #define DHD_DEL_IF			-0xE
420*4882a593Smuzhiyun #define DHD_BAD_IF			-0xF
421*4882a593Smuzhiyun #define DHD_DUMMY_INFO_IF	0xDEAF	/* Hack i/f to handle events from INFO Ring */
422*4882a593Smuzhiyun /* XXX to avoid build error for NDIS for timebeing */
423*4882a593Smuzhiyun #define DHD_EVENT_IF DHD_DUMMY_INFO_IF
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun #if defined(LINUX) || defined(linux)
426*4882a593Smuzhiyun enum dhd_op_flags {
427*4882a593Smuzhiyun /* Firmware requested operation mode */
428*4882a593Smuzhiyun 	DHD_FLAG_STA_MODE				= (1 << (0)), /* STA only */
429*4882a593Smuzhiyun 	DHD_FLAG_HOSTAP_MODE				= (1 << (1)), /* SOFTAP only */
430*4882a593Smuzhiyun 	DHD_FLAG_P2P_MODE				= (1 << (2)), /* P2P Only */
431*4882a593Smuzhiyun 	/* STA + P2P */
432*4882a593Smuzhiyun 	DHD_FLAG_CONCURR_SINGLE_CHAN_MODE = (DHD_FLAG_STA_MODE | DHD_FLAG_P2P_MODE),
433*4882a593Smuzhiyun 	/* STA + SoftAP */
434*4882a593Smuzhiyun 	DHD_FLAG_CONCURR_STA_HOSTAP_MODE = (DHD_FLAG_STA_MODE | DHD_FLAG_HOSTAP_MODE),
435*4882a593Smuzhiyun 	/* XXX MULTI_CHAN mode is meaningful only if it is conccurncy mode */
436*4882a593Smuzhiyun 	DHD_FLAG_CONCURR_MULTI_CHAN_MODE		= (1 << (4)), /* STA + P2P */
437*4882a593Smuzhiyun 	/* Current P2P mode for P2P connection */
438*4882a593Smuzhiyun 	DHD_FLAG_P2P_GC_MODE				= (1 << (5)),
439*4882a593Smuzhiyun 	DHD_FLAG_P2P_GO_MODE				= (1 << (6)),
440*4882a593Smuzhiyun 	DHD_FLAG_MBSS_MODE				= (1 << (7)), /* MBSS in future */
441*4882a593Smuzhiyun 	DHD_FLAG_IBSS_MODE				= (1 << (8)),
442*4882a593Smuzhiyun 	DHD_FLAG_MFG_MODE				= (1 << (9)),
443*4882a593Smuzhiyun 	DHD_FLAG_RSDB_MODE				= (1 << (10)),
444*4882a593Smuzhiyun 	DHD_FLAG_MP2P_MODE				= (1 << (11))
445*4882a593Smuzhiyun };
446*4882a593Smuzhiyun #endif /* defined (LINUX) || defined(linux) */
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun #if defined(BCMDONGLEHOST)
449*4882a593Smuzhiyun #define DHD_OPMODE_SUPPORTED(dhd, opmode_flag) \
450*4882a593Smuzhiyun 	(dhd ? ((((dhd_pub_t *)dhd)->op_mode)  &  opmode_flag) : -1)
451*4882a593Smuzhiyun #define DHD_OPMODE_STA_SOFTAP_CONCURR(dhd) \
452*4882a593Smuzhiyun 	(dhd ? (((((dhd_pub_t *)dhd)->op_mode) & DHD_FLAG_CONCURR_STA_HOSTAP_MODE) == \
453*4882a593Smuzhiyun 	DHD_FLAG_CONCURR_STA_HOSTAP_MODE) : 0)
454*4882a593Smuzhiyun #else
455*4882a593Smuzhiyun #define DHD_OPMODE_SUPPORTED(dhd, opmode_flag)  -1
456*4882a593Smuzhiyun #define DHD_OPMODE_STA_SOFTAP_CONCURR(dhd)	0
457*4882a593Smuzhiyun #endif /* defined (BCMDONGLEHOST) */
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun /* Max sequential TX/RX Control timeouts to set HANG event */
460*4882a593Smuzhiyun #ifndef MAX_CNTL_TX_TIMEOUT
461*4882a593Smuzhiyun #define MAX_CNTL_TX_TIMEOUT 2
462*4882a593Smuzhiyun #endif /* MAX_CNTL_TX_TIMEOUT */
463*4882a593Smuzhiyun #ifndef MAX_CNTL_RX_TIMEOUT
464*4882a593Smuzhiyun #define MAX_CNTL_RX_TIMEOUT 1
465*4882a593Smuzhiyun #endif /* MAX_CNTL_RX_TIMEOUT */
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun #define DHD_SCAN_ASSOC_ACTIVE_TIME	40 /* ms: Embedded default Active setting from DHD */
468*4882a593Smuzhiyun #ifndef CUSTOM_SCAN_UNASSOC_ACTIVE_TIME
469*4882a593Smuzhiyun #define DHD_SCAN_UNASSOC_ACTIVE_TIME	80 /* ms: Embedded def. Unassoc Active setting from DHD */
470*4882a593Smuzhiyun #else
471*4882a593Smuzhiyun #define DHD_SCAN_UNASSOC_ACTIVE_TIME	CUSTOM_SCAN_UNASSOC_ACTIVE_TIME
472*4882a593Smuzhiyun #endif /* CUSTOM_SCAN_UNASSOC_ACTIVE_TIME */
473*4882a593Smuzhiyun #define DHD_SCAN_HOME_TIME		45 /* ms: Embedded default Home time setting from DHD */
474*4882a593Smuzhiyun #define DHD_SCAN_HOME_AWAY_TIME	100 /* ms: Embedded default Home Away time setting from DHD */
475*4882a593Smuzhiyun #ifndef CUSTOM_SCAN_PASSIVE_TIME
476*4882a593Smuzhiyun #define DHD_SCAN_PASSIVE_TIME		130 /* ms: Embedded default Passive setting from DHD */
477*4882a593Smuzhiyun #else
478*4882a593Smuzhiyun #define DHD_SCAN_PASSIVE_TIME	CUSTOM_SCAN_PASSIVE_TIME /* ms: Custom Passive setting from DHD */
479*4882a593Smuzhiyun #endif /* CUSTOM_SCAN_PASSIVE_TIME */
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun #ifndef POWERUP_MAX_RETRY
482*4882a593Smuzhiyun #define POWERUP_MAX_RETRY	3 /* how many times we retry to power up the chip */
483*4882a593Smuzhiyun #endif
484*4882a593Smuzhiyun #ifndef POWERUP_WAIT_MS
485*4882a593Smuzhiyun #define POWERUP_WAIT_MS		2000 /* ms: time out in waiting wifi to come up */
486*4882a593Smuzhiyun #endif
487*4882a593Smuzhiyun /*
488*4882a593Smuzhiyun  * MAX_NVRAMBUF_SIZE determines the size of the Buffer in the DHD that holds
489*4882a593Smuzhiyun  * the NVRAM data. That is the size of the buffer pointed by bus->vars
490*4882a593Smuzhiyun  * This also needs to be increased to 24K to support NVRAM size higher than 16K
491*4882a593Smuzhiyun  */
492*4882a593Smuzhiyun #define MAX_NVRAMBUF_SIZE	(24 * 1024) /* max nvram buf size */
493*4882a593Smuzhiyun #define MAX_CLM_BUF_SIZE	(48 * 1024) /* max clm blob size */
494*4882a593Smuzhiyun #define MAX_TXCAP_BUF_SIZE	(16 * 1024) /* max txcap blob size */
495*4882a593Smuzhiyun #ifdef DHD_DEBUG
496*4882a593Smuzhiyun #define DHD_JOIN_MAX_TIME_DEFAULT 10000 /* ms: Max time out for joining AP */
497*4882a593Smuzhiyun #define DHD_SCAN_DEF_TIMEOUT 10000 /* ms: Max time out for scan in progress */
498*4882a593Smuzhiyun #endif /* DHD_DEBUG */
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun #ifndef CONFIG_BCMDHD_CLM_PATH
501*4882a593Smuzhiyun #ifdef OEM_ANDROID
502*4882a593Smuzhiyun #if defined(CUSTOMER_HW4) && defined(PLATFORM_SLP)
503*4882a593Smuzhiyun #define CONFIG_BCMDHD_CLM_PATH "/lib/firmware/bcmdhd_clm.blob"
504*4882a593Smuzhiyun #else
505*4882a593Smuzhiyun #define CONFIG_BCMDHD_CLM_PATH "/etc/wifi/bcmdhd_clm.blob"
506*4882a593Smuzhiyun #endif /* CUSTOMER_HW4 && PLATFORM_SLP */
507*4882a593Smuzhiyun #elif defined(LINUX) || defined(linux)
508*4882a593Smuzhiyun #define CONFIG_BCMDHD_CLM_PATH "/var/run/bcmdhd_clm.blob"
509*4882a593Smuzhiyun #else
510*4882a593Smuzhiyun /* clm download will fail on empty path */
511*4882a593Smuzhiyun #define CONFIG_BCMDHD_CLM_PATH ""
512*4882a593Smuzhiyun #endif /* OEM_ANDROID */
513*4882a593Smuzhiyun #endif /* CONFIG_BCMDHD_CLM_PATH */
514*4882a593Smuzhiyun #define WL_CCODE_NULL_COUNTRY  "#n"
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun #ifdef DHD_EFI
517*4882a593Smuzhiyun #define FW_VER_STR_LEN	256
518*4882a593Smuzhiyun #else
519*4882a593Smuzhiyun #define FW_VER_STR_LEN	128
520*4882a593Smuzhiyun #endif
521*4882a593Smuzhiyun #define FWID_STR_LEN 256
522*4882a593Smuzhiyun #define CLM_VER_STR_LEN 128
523*4882a593Smuzhiyun #define BUS_API_REV_STR_LEN	128
524*4882a593Smuzhiyun #define FW_VER_STR "Version"
525*4882a593Smuzhiyun #define FWID_STR_1 "FWID: 01-"
526*4882a593Smuzhiyun #define FWID_STR_2 "FWID=01-"
527*4882a593Smuzhiyun extern char bus_api_revision[];
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun enum dhd_bus_wake_state {
530*4882a593Smuzhiyun 	WAKE_LOCK_OFF			= 0,
531*4882a593Smuzhiyun 	WAKE_LOCK_PRIV			= 1,
532*4882a593Smuzhiyun 	WAKE_LOCK_DPC			= 2,
533*4882a593Smuzhiyun 	WAKE_LOCK_IOCTL			= 3,
534*4882a593Smuzhiyun 	WAKE_LOCK_DOWNLOAD		= 4,
535*4882a593Smuzhiyun 	WAKE_LOCK_TMOUT			= 5,
536*4882a593Smuzhiyun 	WAKE_LOCK_WATCHDOG		= 6,
537*4882a593Smuzhiyun 	WAKE_LOCK_LINK_DOWN_TMOUT	= 7,
538*4882a593Smuzhiyun 	WAKE_LOCK_PNO_FIND_TMOUT	= 8,
539*4882a593Smuzhiyun 	WAKE_LOCK_SOFTAP_SET		= 9,
540*4882a593Smuzhiyun 	WAKE_LOCK_SOFTAP_STOP		= 10,
541*4882a593Smuzhiyun 	WAKE_LOCK_SOFTAP_START		= 11,
542*4882a593Smuzhiyun 	WAKE_LOCK_SOFTAP_THREAD		= 12
543*4882a593Smuzhiyun };
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun enum {
546*4882a593Smuzhiyun 	EVENT_BUF_POOL_LOW = 32,
547*4882a593Smuzhiyun 	EVENT_BUF_POOL_MEDIUM = 64,
548*4882a593Smuzhiyun 	EVENT_BUF_POOL_HIGH = 128,
549*4882a593Smuzhiyun 	EVENT_BUF_POOL_HIGHEST = 256
550*4882a593Smuzhiyun };
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun #ifdef PCIE_INB_DW
553*4882a593Smuzhiyun enum dhd_bus_ds_state {
554*4882a593Smuzhiyun 	DW_DEVICE_DS_INVALID		= -1,
555*4882a593Smuzhiyun 	DW_DEVICE_DS_DEV_SLEEP		= 0,
556*4882a593Smuzhiyun 	DW_DEVICE_DS_DEV_SLEEP_PEND	= 1,
557*4882a593Smuzhiyun 	DW_DEVICE_DS_DISABLED_WAIT	= 2,
558*4882a593Smuzhiyun 	DW_DEVICE_DS_DEV_WAKE		= 3,
559*4882a593Smuzhiyun 	DW_DEVICE_DS_ACTIVE		= 4,
560*4882a593Smuzhiyun 	DW_DEVICE_HOST_SLEEP_WAIT	= 5,
561*4882a593Smuzhiyun 	DW_DEVICE_HOST_SLEEP		= 6,
562*4882a593Smuzhiyun 	DW_DEVICE_HOST_WAKE_WAIT	= 7,
563*4882a593Smuzhiyun 	DW_DEVICE_DS_D3_INFORM_WAIT	= 8
564*4882a593Smuzhiyun };
565*4882a593Smuzhiyun #endif /* PCIE_INB_DW */
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun enum dhd_prealloc_index {
568*4882a593Smuzhiyun 	DHD_PREALLOC_PROT			= 0,
569*4882a593Smuzhiyun 	DHD_PREALLOC_RXBUF			= 1,
570*4882a593Smuzhiyun 	DHD_PREALLOC_DATABUF			= 2,
571*4882a593Smuzhiyun 	DHD_PREALLOC_OSL_BUF			= 3,
572*4882a593Smuzhiyun 	DHD_PREALLOC_SKB_BUF = 4,
573*4882a593Smuzhiyun 	DHD_PREALLOC_WIPHY_ESCAN0		= 5,
574*4882a593Smuzhiyun 	DHD_PREALLOC_WIPHY_ESCAN1		= 6,
575*4882a593Smuzhiyun 	DHD_PREALLOC_DHD_INFO			= 7,
576*4882a593Smuzhiyun 	DHD_PREALLOC_DHD_WLFC_INFO		= 8,
577*4882a593Smuzhiyun 	DHD_PREALLOC_IF_FLOW_LKUP		= 9,
578*4882a593Smuzhiyun 	/* 10 */
579*4882a593Smuzhiyun 	DHD_PREALLOC_MEMDUMP_RAM		= 11,
580*4882a593Smuzhiyun 	DHD_PREALLOC_DHD_WLFC_HANGER		= 12,
581*4882a593Smuzhiyun 	DHD_PREALLOC_PKTID_MAP			= 13,
582*4882a593Smuzhiyun 	DHD_PREALLOC_PKTID_MAP_IOCTL		= 14,
583*4882a593Smuzhiyun 	DHD_PREALLOC_DHD_LOG_DUMP_BUF		= 15,
584*4882a593Smuzhiyun 	DHD_PREALLOC_DHD_LOG_DUMP_BUF_EX	= 16,
585*4882a593Smuzhiyun 	DHD_PREALLOC_DHD_PKTLOG_DUMP_BUF	= 17,
586*4882a593Smuzhiyun 	DHD_PREALLOC_STAT_REPORT_BUF = 18,
587*4882a593Smuzhiyun 	DHD_PREALLOC_WL_ESCAN = 19,
588*4882a593Smuzhiyun 	DHD_PREALLOC_FW_VERBOSE_RING = 20,
589*4882a593Smuzhiyun 	DHD_PREALLOC_FW_EVENT_RING = 21,
590*4882a593Smuzhiyun 	DHD_PREALLOC_DHD_EVENT_RING = 22,
591*4882a593Smuzhiyun 	DHD_PREALLOC_NAN_EVENT_RING = 23
592*4882a593Smuzhiyun };
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun enum dhd_dongledump_mode {
595*4882a593Smuzhiyun 	DUMP_DISABLED		= 0,
596*4882a593Smuzhiyun 	DUMP_MEMONLY		= 1,
597*4882a593Smuzhiyun 	DUMP_MEMFILE		= 2,
598*4882a593Smuzhiyun 	DUMP_MEMFILE_BUGON	= 3,
599*4882a593Smuzhiyun 	DUMP_MEMFILE_MAX	= 4
600*4882a593Smuzhiyun };
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun enum dhd_dongledump_type {
603*4882a593Smuzhiyun 	DUMP_TYPE_RESUMED_ON_TIMEOUT		= 1,
604*4882a593Smuzhiyun 	DUMP_TYPE_D3_ACK_TIMEOUT		= 2,
605*4882a593Smuzhiyun 	DUMP_TYPE_DONGLE_TRAP			= 3,
606*4882a593Smuzhiyun 	DUMP_TYPE_MEMORY_CORRUPTION		= 4,
607*4882a593Smuzhiyun 	DUMP_TYPE_PKTID_AUDIT_FAILURE		= 5,
608*4882a593Smuzhiyun 	DUMP_TYPE_PKTID_INVALID			= 6,
609*4882a593Smuzhiyun 	DUMP_TYPE_SCAN_TIMEOUT			= 7,
610*4882a593Smuzhiyun 	DUMP_TYPE_SCAN_BUSY			= 8,
611*4882a593Smuzhiyun 	DUMP_TYPE_BY_SYSDUMP			= 9,
612*4882a593Smuzhiyun 	DUMP_TYPE_BY_LIVELOCK			= 10,
613*4882a593Smuzhiyun 	DUMP_TYPE_AP_LINKUP_FAILURE		= 11,
614*4882a593Smuzhiyun 	DUMP_TYPE_AP_ABNORMAL_ACCESS		= 12,
615*4882a593Smuzhiyun 	DUMP_TYPE_CFG_VENDOR_TRIGGERED		= 13,
616*4882a593Smuzhiyun 	DUMP_TYPE_RESUMED_ON_TIMEOUT_TX		= 14,
617*4882a593Smuzhiyun 	DUMP_TYPE_RESUMED_ON_TIMEOUT_RX		= 15,
618*4882a593Smuzhiyun 	DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR	= 16,
619*4882a593Smuzhiyun 	DUMP_TYPE_TRANS_ID_MISMATCH		= 17,
620*4882a593Smuzhiyun 	DUMP_TYPE_IFACE_OP_FAILURE		= 18,
621*4882a593Smuzhiyun 	DUMP_TYPE_DONGLE_INIT_FAILURE		= 19,
622*4882a593Smuzhiyun 	DUMP_TYPE_READ_SHM_FAIL			= 20,
623*4882a593Smuzhiyun 	DUMP_TYPE_DONGLE_HOST_EVENT		= 21,
624*4882a593Smuzhiyun 	DUMP_TYPE_SMMU_FAULT			= 22,
625*4882a593Smuzhiyun 	DUMP_TYPE_RESUMED_UNKNOWN		= 23,
626*4882a593Smuzhiyun 	DUMP_TYPE_DUE_TO_BT			= 24,
627*4882a593Smuzhiyun 	DUMP_TYPE_LOGSET_BEYOND_RANGE		= 25,
628*4882a593Smuzhiyun 	DUMP_TYPE_BY_USER			= 26,
629*4882a593Smuzhiyun 	DUMP_TYPE_CTO_RECOVERY			= 27,
630*4882a593Smuzhiyun 	DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR	= 28,
631*4882a593Smuzhiyun 	DUMP_TYPE_PROXD_TIMEOUT			= 29,
632*4882a593Smuzhiyun 	DUMP_TYPE_INBAND_DEVICE_WAKE_FAILURE	= 30,
633*4882a593Smuzhiyun 	DUMP_TYPE_PKTID_POOL_DEPLETED		= 31,
634*4882a593Smuzhiyun 	DUMP_TYPE_ESCAN_SYNCID_MISMATCH		= 32,
635*4882a593Smuzhiyun 	DUMP_TYPE_INVALID_SHINFO_NRFRAGS	= 33
636*4882a593Smuzhiyun };
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun enum dhd_hang_reason {
639*4882a593Smuzhiyun 	HANG_REASON_MASK				= 0x8000,
640*4882a593Smuzhiyun 	HANG_REASON_IOCTL_RESP_TIMEOUT			= 0x8001,
641*4882a593Smuzhiyun 	HANG_REASON_DONGLE_TRAP				= 0x8002,
642*4882a593Smuzhiyun 	HANG_REASON_D3_ACK_TIMEOUT			= 0x8003,
643*4882a593Smuzhiyun 	HANG_REASON_BUS_DOWN				= 0x8004,
644*4882a593Smuzhiyun 	HANG_REASON_MSGBUF_LIVELOCK			= 0x8006,
645*4882a593Smuzhiyun 	HANG_REASON_IFACE_DEL_FAILURE			= 0x8007,
646*4882a593Smuzhiyun 	HANG_REASON_HT_AVAIL_ERROR			= 0x8008,
647*4882a593Smuzhiyun 	HANG_REASON_PCIE_RC_LINK_UP_FAIL		= 0x8009,
648*4882a593Smuzhiyun 	HANG_REASON_PCIE_PKTID_ERROR			= 0x800A,
649*4882a593Smuzhiyun 	HANG_REASON_IFACE_ADD_FAILURE			= 0x800B,
650*4882a593Smuzhiyun 	HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR	= 0x800C,
651*4882a593Smuzhiyun 	HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR		= 0x800D,
652*4882a593Smuzhiyun 	HANG_REASON_SEQUENTIAL_PRIVCMD_ERROR		= 0x800E,
653*4882a593Smuzhiyun 	HANG_REASON_SCAN_BUSY				= 0x800F,
654*4882a593Smuzhiyun 	HANG_REASON_BSS_UP_FAILURE			= 0x8010,
655*4882a593Smuzhiyun 	HANG_REASON_BSS_DOWN_FAILURE			= 0x8011,
656*4882a593Smuzhiyun 	HANG_REASON_IOCTL_SUSPEND_ERROR			= 0x8012,
657*4882a593Smuzhiyun 	HANG_REASON_ESCAN_SYNCID_MISMATCH		= 0x8013,
658*4882a593Smuzhiyun 	HANG_REASON_PCIE_LINK_DOWN_RC_DETECT		= 0x8805,
659*4882a593Smuzhiyun 	HANG_REASON_INVALID_EVENT_OR_DATA		= 0x8806,
660*4882a593Smuzhiyun 	HANG_REASON_UNKNOWN				= 0x8807,
661*4882a593Smuzhiyun 	HANG_REASON_PCIE_LINK_DOWN_EP_DETECT		= 0x8808,
662*4882a593Smuzhiyun 	HANG_REASON_PCIE_CTO_DETECT			= 0x8809,
663*4882a593Smuzhiyun 	HANG_REASON_MAX					= 0x880A
664*4882a593Smuzhiyun };
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun #define WLC_E_DEAUTH_MAX_REASON 0x0FFF
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun enum dhd_rsdb_scan_features {
669*4882a593Smuzhiyun 	/* Downgraded scan feature for AP active */
670*4882a593Smuzhiyun 	RSDB_SCAN_DOWNGRADED_AP_SCAN = 0x01,
671*4882a593Smuzhiyun 	/* Downgraded scan feature for P2P Discovery */
672*4882a593Smuzhiyun 	RSDB_SCAN_DOWNGRADED_P2P_DISC_SCAN = 0x02,
673*4882a593Smuzhiyun 	/* Enable channel pruning for ROAM SCAN */
674*4882a593Smuzhiyun 	RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM = 0x10,
675*4882a593Smuzhiyun 	/* Enable channel pruning for any SCAN */
676*4882a593Smuzhiyun 	RSDB_SCAN_DOWNGRADED_CH_PRUNE_ALL  = 0x20
677*4882a593Smuzhiyun };
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun #define VENDOR_SEND_HANG_EXT_INFO_LEN (800 + 1)
680*4882a593Smuzhiyun #ifdef DHD_EWPR_VER2
681*4882a593Smuzhiyun #define VENDOR_SEND_HANG_EXT_INFO_VER 20181111
682*4882a593Smuzhiyun #else
683*4882a593Smuzhiyun #define VENDOR_SEND_HANG_EXT_INFO_VER 20170905
684*4882a593Smuzhiyun #endif /* DHD_EWPR_VER2 */
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun #define HANG_INFO_TRAP_T_NAME_MAX 6
687*4882a593Smuzhiyun #define HANG_INFO_TRAP_T_REASON_IDX 0
688*4882a593Smuzhiyun #define HANG_INFO_TRAP_T_SUBTYPE_IDX 2
689*4882a593Smuzhiyun #define HANG_INFO_TRAP_T_OFFSET_IDX 3
690*4882a593Smuzhiyun #define HANG_INFO_TRAP_T_EPC_IDX 4
691*4882a593Smuzhiyun #define HANG_FIELD_STR_MAX_LEN 9
692*4882a593Smuzhiyun #define HANG_FIELD_CNT_MAX 69
693*4882a593Smuzhiyun #define HANG_FIELD_IF_FAILURE_CNT 10
694*4882a593Smuzhiyun #define HANG_FIELD_IOCTL_RESP_TIMEOUT_CNT 8
695*4882a593Smuzhiyun #define HANG_FIELD_TRAP_T_STACK_CNT_MAX 16
696*4882a593Smuzhiyun #define HANG_FIELD_MISMATCH_CNT 10
697*4882a593Smuzhiyun #define HANG_INFO_BIGDATA_KEY_STACK_CNT 4
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun #define DEBUG_DUMP_TIME_BUF_LEN (16 + 1)
700*4882a593Smuzhiyun /* delimiter between values */
701*4882a593Smuzhiyun #define HANG_KEY_DEL    ' '
702*4882a593Smuzhiyun #define HANG_RAW_DEL    '_'
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun #ifdef DHD_EWPR_VER2
705*4882a593Smuzhiyun #define HANG_INFO_BIGDATA_EXTRA_KEY 4
706*4882a593Smuzhiyun #define HANG_INFO_TRAP_T_EXTRA_KEY_IDX 5
707*4882a593Smuzhiyun #endif
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun /* Packet alignment for most efficient SDIO (can change based on platform) */
710*4882a593Smuzhiyun #ifndef DHD_SDALIGN
711*4882a593Smuzhiyun #define DHD_SDALIGN	32
712*4882a593Smuzhiyun #endif
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun #define DHD_TX_CONTEXT_MASK 0xff
715*4882a593Smuzhiyun #define DHD_TX_START_XMIT   0x01
716*4882a593Smuzhiyun #define DHD_TX_SEND_PKT     0x02
717*4882a593Smuzhiyun #define DHD_IF_SET_TX_ACTIVE(ifp, context)	\
718*4882a593Smuzhiyun     ifp->tx_paths_active |= context;
719*4882a593Smuzhiyun #define DHD_IF_CLR_TX_ACTIVE(ifp, context)	\
720*4882a593Smuzhiyun     ifp->tx_paths_active &= ~context;
721*4882a593Smuzhiyun #define DHD_IF_IS_TX_ACTIVE(ifp)	\
722*4882a593Smuzhiyun 	(ifp->tx_paths_active)
723*4882a593Smuzhiyun /**
724*4882a593Smuzhiyun  * DMA-able buffer parameters
725*4882a593Smuzhiyun  * - dmaaddr_t is 32bits on a 32bit host.
726*4882a593Smuzhiyun  *   dhd_dma_buf::pa may not be used as a sh_addr_t, bcm_addr64_t or uintptr
727*4882a593Smuzhiyun  * - dhd_dma_buf::_alloced is ONLY for freeing a DMA-able buffer.
728*4882a593Smuzhiyun  */
729*4882a593Smuzhiyun typedef struct dhd_dma_buf {
730*4882a593Smuzhiyun 	void      *va;      /* virtual address of buffer */
731*4882a593Smuzhiyun 	uint32    len;      /* user requested buffer length */
732*4882a593Smuzhiyun 	dmaaddr_t pa;       /* physical address of buffer */
733*4882a593Smuzhiyun 	void      *dmah;    /* dma mapper handle */
734*4882a593Smuzhiyun 	void      *secdma;  /* secure dma sec_cma_info handle */
735*4882a593Smuzhiyun 	uint32    _alloced; /* actual size of buffer allocated with align and pad */
736*4882a593Smuzhiyun } dhd_dma_buf_t;
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun /* host reordering packts logic */
739*4882a593Smuzhiyun /* followed the structure to hold the reorder buffers (void **p) */
740*4882a593Smuzhiyun typedef struct reorder_info {
741*4882a593Smuzhiyun 	void **p;
742*4882a593Smuzhiyun 	uint8 flow_id;
743*4882a593Smuzhiyun 	uint8 cur_idx;
744*4882a593Smuzhiyun 	uint8 exp_idx;
745*4882a593Smuzhiyun 	uint8 max_idx;
746*4882a593Smuzhiyun 	uint8 pend_pkts;
747*4882a593Smuzhiyun } reorder_info_t;
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun /* throughput test packet format */
750*4882a593Smuzhiyun typedef struct tput_pkt {
751*4882a593Smuzhiyun 	/* header */
752*4882a593Smuzhiyun 	uint8 mac_sta[ETHER_ADDR_LEN];
753*4882a593Smuzhiyun 	uint8 mac_ap[ETHER_ADDR_LEN];
754*4882a593Smuzhiyun 	uint16 pkt_type;
755*4882a593Smuzhiyun 	uint8 PAD[2];
756*4882a593Smuzhiyun 	/* data */
757*4882a593Smuzhiyun 	uint32 crc32;
758*4882a593Smuzhiyun 	uint32 pkt_id;
759*4882a593Smuzhiyun 	uint32 num_pkts;
760*4882a593Smuzhiyun } tput_pkt_t;
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun typedef enum {
763*4882a593Smuzhiyun 	TPUT_PKT_TYPE_NORMAL,
764*4882a593Smuzhiyun 	TPUT_PKT_TYPE_STOP
765*4882a593Smuzhiyun } tput_pkt_type_t;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun #define TPUT_TEST_MAX_PAYLOAD 1500
768*4882a593Smuzhiyun #define TPUT_TEST_WAIT_TIMEOUT_DEFAULT 5000
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun #ifdef DHDTCPACK_SUPPRESS
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun enum {
773*4882a593Smuzhiyun 	/* TCPACK suppress off */
774*4882a593Smuzhiyun 	TCPACK_SUP_OFF,
775*4882a593Smuzhiyun 	/* Replace TCPACK in txq when new coming one has higher ACK number. */
776*4882a593Smuzhiyun 	TCPACK_SUP_REPLACE,
777*4882a593Smuzhiyun 	/* TCPACK_SUP_REPLACE + delayed TCPACK TX unless ACK to PSH DATA.
778*4882a593Smuzhiyun 	 * This will give benefits to Half-Duplex bus interface(e.g. SDIO) that
779*4882a593Smuzhiyun 	 * 1. we are able to read TCP DATA packets first from the bus
780*4882a593Smuzhiyun 	 * 2. TCPACKs that don't need to hurry delivered remains longer in TXQ so can be suppressed.
781*4882a593Smuzhiyun 	 */
782*4882a593Smuzhiyun 	TCPACK_SUP_DELAYTX,
783*4882a593Smuzhiyun 	TCPACK_SUP_HOLD,
784*4882a593Smuzhiyun 	TCPACK_SUP_LAST_MODE
785*4882a593Smuzhiyun };
786*4882a593Smuzhiyun #endif /* DHDTCPACK_SUPPRESS */
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun #if defined(BCM_ROUTER_DHD)
789*4882a593Smuzhiyun #define DHD_DWM_TBL_SIZE           57
790*4882a593Smuzhiyun /* DSCP WMM AC Mapping macros and structures */
791*4882a593Smuzhiyun #define DHD_TRF_MGMT_DWM_FILTER_BIT                 0x8
792*4882a593Smuzhiyun #define DHD_TRF_MGMT_DWM_PRIO_BITS                  0x7
793*4882a593Smuzhiyun #define DHD_TRF_MGMT_DWM_FAVORED_BIT                0x10
794*4882a593Smuzhiyun #define DHD_TRF_MGMT_DWM_PRIO(dwm_tbl_entry) ((dwm_tbl_entry) & DHD_TRF_MGMT_DWM_PRIO_BITS)
795*4882a593Smuzhiyun #define DHD_TRF_MGMT_DWM_IS_FAVORED_SET(dwm_tbl_entry) \
796*4882a593Smuzhiyun 	((dwm_tbl_entry) & DHD_TRF_MGMT_DWM_FAVORED_BIT)
797*4882a593Smuzhiyun #define DHD_TRF_MGMT_DWM_SET_FAVORED(dwm_tbl_entry) \
798*4882a593Smuzhiyun 	((dwm_tbl_entry) |= DHD_TRF_MGMT_DWM_FAVORED_BIT)
799*4882a593Smuzhiyun #define DHD_TRF_MGMT_DWM_IS_FILTER_SET(dwm_tbl_entry) \
800*4882a593Smuzhiyun 	((dwm_tbl_entry) & DHD_TRF_MGMT_DWM_FILTER_BIT)
801*4882a593Smuzhiyun #define DHD_TRF_MGMT_DWM_SET_FILTER(dwm_tbl_entry) \
802*4882a593Smuzhiyun 	((dwm_tbl_entry) |= DHD_TRF_MGMT_DWM_FILTER_BIT)
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun typedef struct {
805*4882a593Smuzhiyun 	uint8 dhd_dwm_enabled;
806*4882a593Smuzhiyun 	uint8 dhd_dwm_tbl[DHD_DWM_TBL_SIZE];
807*4882a593Smuzhiyun } dhd_trf_mgmt_dwm_tbl_t;
808*4882a593Smuzhiyun #endif /* for BCM_ROUTER_DHD */
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun #define DHD_NULL_CHK_AND_RET(cond) \
811*4882a593Smuzhiyun 	if (!cond) { \
812*4882a593Smuzhiyun 		DHD_ERROR(("%s " #cond " is NULL\n", __FUNCTION__)); \
813*4882a593Smuzhiyun 		return; \
814*4882a593Smuzhiyun 	}
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun #define DHD_NULL_CHK_AND_RET_VAL(cond, value) \
817*4882a593Smuzhiyun 	if (!cond) { \
818*4882a593Smuzhiyun 		DHD_ERROR(("%s " #cond " is NULL\n", __FUNCTION__)); \
819*4882a593Smuzhiyun 		return value; \
820*4882a593Smuzhiyun 	}
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun #define DHD_NULL_CHK_AND_GOTO(cond, label) \
823*4882a593Smuzhiyun 	if (!cond) { \
824*4882a593Smuzhiyun 		DHD_ERROR(("%s " #cond " is NULL\n", __FUNCTION__)); \
825*4882a593Smuzhiyun 		goto label; \
826*4882a593Smuzhiyun 	}
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun /*
829*4882a593Smuzhiyun  * Accumulating the queue lengths of all flowring queues in a parent object,
830*4882a593Smuzhiyun  * to assert flow control, when the cummulative queue length crosses an upper
831*4882a593Smuzhiyun  * threshold defined on a parent object. Upper threshold may be maintained
832*4882a593Smuzhiyun  * at a station level, at an interface level, or at a dhd instance.
833*4882a593Smuzhiyun  *
834*4882a593Smuzhiyun  * cumm_ctr_t abstraction:
835*4882a593Smuzhiyun  * cumm_ctr_t abstraction may be enhanced to use an object with a hysterisis
836*4882a593Smuzhiyun  * pause on/off threshold callback.
837*4882a593Smuzhiyun  * All macros use the address of the cummulative length in the parent objects.
838*4882a593Smuzhiyun  *
839*4882a593Smuzhiyun  * Cummulative counters in parent objects may be updated without spinlocks.
840*4882a593Smuzhiyun  *
841*4882a593Smuzhiyun  * If a cummulative queue length is desired across all flows
842*4882a593Smuzhiyun  * belonging to either of (a station, or an interface or a dhd instance), then
843*4882a593Smuzhiyun  * an atomic operation is required using an atomic_t cummulative counters or
844*4882a593Smuzhiyun  * using a spinlock. BCM_ROUTER_DHD uses the Linux atomic_t construct.
845*4882a593Smuzhiyun  */
846*4882a593Smuzhiyun #if defined(BCM_ROUTER_DHD)
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun typedef atomic_t cumm_ctr_t;       /* BCM_ROUTER_DHD Linux: atomic operations */
849*4882a593Smuzhiyun #define DHD_CUMM_CTR_PTR(clen)     ((cumm_ctr_t*)(clen))
850*4882a593Smuzhiyun #define DHD_CUMM_CTR(clen)         DHD_CUMM_CTR_PTR(clen) /* atomic accessor */
851*4882a593Smuzhiyun #define DHD_CUMM_CTR_READ(clen)    atomic_read(DHD_CUMM_CTR(clen)) /* read */
852*4882a593Smuzhiyun #define DHD_CUMM_CTR_INIT(clen)                                                \
853*4882a593Smuzhiyun 	ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL));                  \
854*4882a593Smuzhiyun 	atomic_set(DHD_CUMM_CTR(clen), 0);
855*4882a593Smuzhiyun #define DHD_CUMM_CTR_INCR(clen)                                                \
856*4882a593Smuzhiyun 	ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL));                  \
857*4882a593Smuzhiyun 	atomic_add(1, DHD_CUMM_CTR(clen));                                         \
858*4882a593Smuzhiyun 	ASSERT(DHD_CUMM_CTR_READ(clen) != 0); /* ensure it does not wrap */
859*4882a593Smuzhiyun #define DHD_CUMM_CTR_DECR(clen)                                                \
860*4882a593Smuzhiyun 	ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL));                  \
861*4882a593Smuzhiyun 	ASSERT(DHD_CUMM_CTR_READ(clen) > 0);                                       \
862*4882a593Smuzhiyun 	atomic_sub(1, DHD_CUMM_CTR(clen));
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun #else  /* ! BCM_ROUTER_DHD */
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun /* Cummulative length not supported. */
867*4882a593Smuzhiyun typedef uint32 cumm_ctr_t;
868*4882a593Smuzhiyun #define DHD_CUMM_CTR_PTR(clen)     ((cumm_ctr_t*)(clen))
869*4882a593Smuzhiyun #define DHD_CUMM_CTR(clen)         *(DHD_CUMM_CTR_PTR(clen)) /* accessor */
870*4882a593Smuzhiyun #define DHD_CUMM_CTR_READ(clen)    DHD_CUMM_CTR(clen) /* read access */
871*4882a593Smuzhiyun #define DHD_CUMM_CTR_INIT(clen)                                                \
872*4882a593Smuzhiyun 	ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL));
873*4882a593Smuzhiyun #define DHD_CUMM_CTR_INCR(clen)                                                \
874*4882a593Smuzhiyun 	ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL));
875*4882a593Smuzhiyun #define DHD_CUMM_CTR_DECR(clen)                                                \
876*4882a593Smuzhiyun 	ASSERT(DHD_CUMM_CTR_PTR(clen) != DHD_CUMM_CTR_PTR(NULL));
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun #endif /* ! BCM_ROUTER_DHD */
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
881*4882a593Smuzhiyun struct tdls_peer_node {
882*4882a593Smuzhiyun 	uint8 addr[ETHER_ADDR_LEN];
883*4882a593Smuzhiyun 	struct tdls_peer_node *next;
884*4882a593Smuzhiyun };
885*4882a593Smuzhiyun typedef struct tdls_peer_node tdls_peer_node_t;
886*4882a593Smuzhiyun typedef struct {
887*4882a593Smuzhiyun 	tdls_peer_node_t *node;
888*4882a593Smuzhiyun 	uint8 tdls_peer_count;
889*4882a593Smuzhiyun } tdls_peer_tbl_t;
890*4882a593Smuzhiyun #endif /* defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun typedef enum dhd_ring_id {
893*4882a593Smuzhiyun 	DEBUG_RING_ID_INVALID = 0x1,
894*4882a593Smuzhiyun 	FW_VERBOSE_RING_ID = 0x2,
895*4882a593Smuzhiyun 	DHD_EVENT_RING_ID = 0x3,
896*4882a593Smuzhiyun 	DRIVER_LOG_RING_ID = 0x4,
897*4882a593Smuzhiyun 	ROAM_STATS_RING_ID = 0x5,
898*4882a593Smuzhiyun 	BT_LOG_RING_ID = 0x6,
899*4882a593Smuzhiyun 	DEBUG_RING_ID_MAX = 0x7
900*4882a593Smuzhiyun } dhd_ring_id_t;
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun #ifdef DHD_LOG_DUMP
903*4882a593Smuzhiyun #define DUMP_SSSR_ATTR_START	2
904*4882a593Smuzhiyun #define DUMP_SSSR_ATTR_COUNT	10
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun typedef enum {
907*4882a593Smuzhiyun 	SSSR_C0_D11_BEFORE = 0,
908*4882a593Smuzhiyun 	SSSR_C0_D11_AFTER = 1,
909*4882a593Smuzhiyun 	SSSR_C1_D11_BEFORE = 2,
910*4882a593Smuzhiyun 	SSSR_C1_D11_AFTER = 3,
911*4882a593Smuzhiyun 	SSSR_C2_D11_BEFORE = 4,
912*4882a593Smuzhiyun 	SSSR_C2_D11_AFTER = 5,
913*4882a593Smuzhiyun 	SSSR_DIG_BEFORE = 6,
914*4882a593Smuzhiyun 	SSSR_DIG_AFTER = 7
915*4882a593Smuzhiyun } EWP_SSSR_DUMP;
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun typedef enum {
918*4882a593Smuzhiyun 	DLD_BUF_TYPE_GENERAL = 0,
919*4882a593Smuzhiyun 	DLD_BUF_TYPE_PRESERVE = 1,
920*4882a593Smuzhiyun 	DLD_BUF_TYPE_SPECIAL = 2,
921*4882a593Smuzhiyun 	DLD_BUF_TYPE_ECNTRS = 3,
922*4882a593Smuzhiyun 	DLD_BUF_TYPE_FILTER = 4,
923*4882a593Smuzhiyun 	DLD_BUF_TYPE_ALL = 5
924*4882a593Smuzhiyun } log_dump_type_t;
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun #ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
927*4882a593Smuzhiyun struct dhd_dbg_ring_buf
928*4882a593Smuzhiyun {
929*4882a593Smuzhiyun 	void *dhd_pub;
930*4882a593Smuzhiyun };
931*4882a593Smuzhiyun #endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun #define LOG_DUMP_MAGIC 0xDEB3DEB3
934*4882a593Smuzhiyun #define HEALTH_CHK_BUF_SIZE 256
935*4882a593Smuzhiyun #ifdef EWP_ECNTRS_LOGGING
936*4882a593Smuzhiyun #define ECNTR_RING_ID 0xECDB
937*4882a593Smuzhiyun #define	ECNTR_RING_NAME	"ewp_ecntr_ring"
938*4882a593Smuzhiyun #endif /* EWP_ECNTRS_LOGGING */
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun #ifdef EWP_RTT_LOGGING
941*4882a593Smuzhiyun #define	RTT_RING_ID 0xADCD
942*4882a593Smuzhiyun #define	RTT_RING_NAME	"ewp_rtt_ring"
943*4882a593Smuzhiyun #endif /* EWP_ECNTRS_LOGGING */
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun #ifdef EWP_BCM_TRACE
946*4882a593Smuzhiyun #define	BCM_TRACE_RING_ID 0xBCBC
947*4882a593Smuzhiyun #define	BCM_TRACE_RING_NAME "ewp_bcm_trace_ring"
948*4882a593Smuzhiyun #endif /* EWP_BCM_TRACE */
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun /*
951*4882a593Smuzhiyun  * XXX: Always add new enums at the end to compatible with parser,
952*4882a593Smuzhiyun  * also add new section in split_ret of EWP_config.py
953*4882a593Smuzhiyun  */
954*4882a593Smuzhiyun typedef enum {
955*4882a593Smuzhiyun 	LOG_DUMP_SECTION_GENERAL = 0,
956*4882a593Smuzhiyun 	LOG_DUMP_SECTION_ECNTRS,
957*4882a593Smuzhiyun 	LOG_DUMP_SECTION_SPECIAL,
958*4882a593Smuzhiyun 	LOG_DUMP_SECTION_DHD_DUMP,
959*4882a593Smuzhiyun 	LOG_DUMP_SECTION_EXT_TRAP,
960*4882a593Smuzhiyun 	LOG_DUMP_SECTION_HEALTH_CHK,
961*4882a593Smuzhiyun 	LOG_DUMP_SECTION_PRESERVE,
962*4882a593Smuzhiyun 	LOG_DUMP_SECTION_COOKIE,
963*4882a593Smuzhiyun 	LOG_DUMP_SECTION_FLOWRING,
964*4882a593Smuzhiyun 	LOG_DUMP_SECTION_STATUS,
965*4882a593Smuzhiyun 	LOG_DUMP_SECTION_RTT,
966*4882a593Smuzhiyun 	LOG_DUMP_SECTION_BCM_TRACE
967*4882a593Smuzhiyun } log_dump_section_type_t;
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun /* Each section in the debug_dump log file shall begin with a header */
970*4882a593Smuzhiyun typedef struct {
971*4882a593Smuzhiyun 	uint32 magic;  /* 0xDEB3DEB3 */
972*4882a593Smuzhiyun 	uint32 type;   /* of type log_dump_section_type_t */
973*4882a593Smuzhiyun 	uint64 timestamp;
974*4882a593Smuzhiyun 	uint32 length;  /* length of the section that follows */
975*4882a593Smuzhiyun } log_dump_section_hdr_t;
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun /* below structure describe ring buffer. */
978*4882a593Smuzhiyun struct dhd_log_dump_buf
979*4882a593Smuzhiyun {
980*4882a593Smuzhiyun #if defined(LINUX) || defined(linux) || defined(ANDROID) || defined(OEM_ANDROID)
981*4882a593Smuzhiyun 	spinlock_t lock;
982*4882a593Smuzhiyun #endif
983*4882a593Smuzhiyun 	void *dhd_pub;
984*4882a593Smuzhiyun 	unsigned int enable;
985*4882a593Smuzhiyun 	unsigned int wraparound;
986*4882a593Smuzhiyun 	unsigned long max;
987*4882a593Smuzhiyun 	unsigned int remain;
988*4882a593Smuzhiyun 	char* present;
989*4882a593Smuzhiyun 	char* front;
990*4882a593Smuzhiyun 	char* buffer;
991*4882a593Smuzhiyun };
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun #define DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE	256
994*4882a593Smuzhiyun #define DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE (80 * 1024)
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun extern void dhd_log_dump_write(int type, char *binary_data,
997*4882a593Smuzhiyun 		int binary_len, const char *fmt, ...);
998*4882a593Smuzhiyun #endif /* DHD_LOG_DUMP */
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun /* DEBUG_DUMP SUB COMMAND */
1001*4882a593Smuzhiyun enum {
1002*4882a593Smuzhiyun 	CMD_DEFAULT,
1003*4882a593Smuzhiyun 	CMD_UNWANTED,
1004*4882a593Smuzhiyun 	CMD_DISCONNECTED,
1005*4882a593Smuzhiyun 	CMD_MAX
1006*4882a593Smuzhiyun };
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun #define DHD_LOG_DUMP_TS_MULTIPLIER_VALUE    60
1009*4882a593Smuzhiyun #define DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSSMSMS    "%02d%02d%02d%02d%02d%02d%04d"
1010*4882a593Smuzhiyun #define DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSS        "%02d%02d%02d%02d%02d%02d"
1011*4882a593Smuzhiyun #define DHD_DEBUG_DUMP_TYPE		"debug_dump"
1012*4882a593Smuzhiyun #define DHD_DUMP_SUBSTR_UNWANTED	"_unwanted"
1013*4882a593Smuzhiyun #define DHD_DUMP_SUBSTR_DISCONNECTED	"_disconnected"
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun #ifdef DNGL_AXI_ERROR_LOGGING
1016*4882a593Smuzhiyun #define DHD_DUMP_AXI_ERROR_FILENAME	"axi_error"
1017*4882a593Smuzhiyun #define DHD_DUMP_HAL_FILENAME_SUFFIX	"_hal"
1018*4882a593Smuzhiyun #endif /* DNGL_AXI_ERROR_LOGGING */
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun extern void get_debug_dump_time(char *str);
1021*4882a593Smuzhiyun extern void clear_debug_dump_time(char *str);
1022*4882a593Smuzhiyun #if defined(WL_CFGVENDOR_SEND_HANG_EVENT) || defined(DHD_PKT_LOGGING)
1023*4882a593Smuzhiyun extern void copy_debug_dump_time(char *dest, char *src);
1024*4882a593Smuzhiyun #endif /* WL_CFGVENDOR_SEND_HANG_EVENT || DHD_PKT_LOGGING */
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun #define FW_LOGSET_MASK_ALL 0xFFFFu
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun #if defined(CUSTOMER_HW4)
1029*4882a593Smuzhiyun #ifndef DHD_COMMON_DUMP_PATH
1030*4882a593Smuzhiyun #define DHD_COMMON_DUMP_PATH	"/data/log/wifi/"
1031*4882a593Smuzhiyun #endif /* !DHD_COMMON_DUMP_PATH */
1032*4882a593Smuzhiyun #elif defined(CUSTOMER_HW2_DEBUG)
1033*4882a593Smuzhiyun #define DHD_COMMON_DUMP_PATH    PLATFORM_PATH
1034*4882a593Smuzhiyun #elif defined(BOARD_HIKEY)
1035*4882a593Smuzhiyun #define DHD_COMMON_DUMP_PATH	"/data/misc/wifi/"
1036*4882a593Smuzhiyun #elif defined(OEM_ANDROID) && defined(__ARM_ARCH_7A__)
1037*4882a593Smuzhiyun #define DHD_COMMON_DUMP_PATH	"/data/vendor/wifi/"
1038*4882a593Smuzhiyun #elif defined(OEM_ANDROID) /* For Brix Live Image */
1039*4882a593Smuzhiyun #define DHD_COMMON_DUMP_PATH	"/installmedia/"
1040*4882a593Smuzhiyun #else /* Default */
1041*4882a593Smuzhiyun #define DHD_COMMON_DUMP_PATH	"/root/"
1042*4882a593Smuzhiyun #endif /* CUSTOMER_HW4 */
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun #define DHD_MEMDUMP_LONGSTR_LEN 180
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun struct cntry_locales_custom {
1047*4882a593Smuzhiyun 	char iso_abbrev[WLC_CNTRY_BUF_SZ];      /* ISO 3166-1 country abbreviation */
1048*4882a593Smuzhiyun 	char custom_locale[WLC_CNTRY_BUF_SZ];   /* Custom firmware locale */
1049*4882a593Smuzhiyun 	int32 custom_locale_rev;                /* Custom local revisin default -1 */
1050*4882a593Smuzhiyun };
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun #ifdef DHD_PKTTS
1053*4882a593Smuzhiyun #if defined(linux) || defined(LINUX)
1054*4882a593Smuzhiyun extern uint dhd_msgbuf_get_ipv6_id(void *pkt);
1055*4882a593Smuzhiyun #else
dhd_msgbuf_get_ipv6_id(void * pkt)1056*4882a593Smuzhiyun static INLINE uint dhd_msgbuf_get_ipv6_id(void *pkt) { return 0; }
1057*4882a593Smuzhiyun #endif /* linux || LINUX */
1058*4882a593Smuzhiyun int dhd_send_msg_to_ts(struct sk_buff *skb, void *data, int size);
1059*4882a593Smuzhiyun #endif /* DHD_PKTTS */
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun #if defined(LINUX) || defined(linux)
1062*4882a593Smuzhiyun int dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size);
1063*4882a593Smuzhiyun #endif /* LINUX || linux */
1064*4882a593Smuzhiyun #ifdef REPORT_FATAL_TIMEOUTS
1065*4882a593Smuzhiyun typedef struct timeout_info {
1066*4882a593Smuzhiyun 	void	*scan_timer_lock;
1067*4882a593Smuzhiyun 	void	*join_timer_lock;
1068*4882a593Smuzhiyun 	void	*cmd_timer_lock;
1069*4882a593Smuzhiyun 	void	*bus_timer_lock;
1070*4882a593Smuzhiyun 	uint32 scan_timeout_val;
1071*4882a593Smuzhiyun 	uint32 join_timeout_val;
1072*4882a593Smuzhiyun 	uint32 cmd_timeout_val;
1073*4882a593Smuzhiyun 	uint32 bus_timeout_val;
1074*4882a593Smuzhiyun 	bool scan_timer_active;
1075*4882a593Smuzhiyun 	bool join_timer_active;
1076*4882a593Smuzhiyun 	bool cmd_timer_active;
1077*4882a593Smuzhiyun 	bool bus_timer_active;
1078*4882a593Smuzhiyun 	osl_timer_t *scan_timer;
1079*4882a593Smuzhiyun 	osl_timer_t *join_timer;
1080*4882a593Smuzhiyun 	osl_timer_t *cmd_timer;
1081*4882a593Smuzhiyun 	osl_timer_t *bus_timer;
1082*4882a593Smuzhiyun 	uint32 cmd_request_id;
1083*4882a593Smuzhiyun 	uint32 cmd;
1084*4882a593Smuzhiyun 	uint32 cmd_join_error;
1085*4882a593Smuzhiyun 	uint16 escan_syncid;
1086*4882a593Smuzhiyun 	bool escan_aborted;
1087*4882a593Smuzhiyun 	uint16 abort_syncid;
1088*4882a593Smuzhiyun } timeout_info_t;
1089*4882a593Smuzhiyun #endif /* REPORT_FATAL_TIMEOUTS */
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun #ifdef DMAMAP_STATS
1092*4882a593Smuzhiyun typedef struct dmamap_stats {
1093*4882a593Smuzhiyun 	uint64 txdata;
1094*4882a593Smuzhiyun 	uint64 txdata_sz;
1095*4882a593Smuzhiyun 	uint64 rxdata;
1096*4882a593Smuzhiyun 	uint64 rxdata_sz;
1097*4882a593Smuzhiyun 	uint64 ioctl_rx;
1098*4882a593Smuzhiyun 	uint64 ioctl_rx_sz;
1099*4882a593Smuzhiyun 	uint64 event_rx;
1100*4882a593Smuzhiyun 	uint64 event_rx_sz;
1101*4882a593Smuzhiyun 	uint64 info_rx;
1102*4882a593Smuzhiyun 	uint64 info_rx_sz;
1103*4882a593Smuzhiyun 	uint64 tsbuf_rx;
1104*4882a593Smuzhiyun 	uint64 tsbuf_rx_sz;
1105*4882a593Smuzhiyun } dma_stats_t;
1106*4882a593Smuzhiyun #endif /* DMAMAP_STATS */
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun #ifdef BT_OVER_PCIE
1109*4882a593Smuzhiyun enum dhd_bus_quiesce_state {
1110*4882a593Smuzhiyun 	DHD_QUIESCE_INIT = 0,
1111*4882a593Smuzhiyun 	REQUEST_BT_QUIESCE = 1,
1112*4882a593Smuzhiyun 	RESPONSE_BT_QUIESCE = 2,
1113*4882a593Smuzhiyun 	REQUEST_BT_RESUME = 3,
1114*4882a593Smuzhiyun 	RESPONSE_BT_RESUME = 4
1115*4882a593Smuzhiyun };
1116*4882a593Smuzhiyun #endif /* BT_OVER_PCIE */
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun /*  see wlfc_proto.h for tx status details */
1119*4882a593Smuzhiyun #define DHD_MAX_TX_STATUS_MSGS     9u
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun #ifdef TX_STATUS_LATENCY_STATS
1122*4882a593Smuzhiyun typedef struct dhd_if_tx_status_latency {
1123*4882a593Smuzhiyun 	/* total number of tx_status received on this interface */
1124*4882a593Smuzhiyun 	uint64 num_tx_status;
1125*4882a593Smuzhiyun 	/* cumulative tx_status latency for this interface */
1126*4882a593Smuzhiyun 	uint64 cum_tx_status_latency;
1127*4882a593Smuzhiyun } dhd_if_tx_status_latency_t;
1128*4882a593Smuzhiyun #endif /* TX_STATUS_LATENCY_STATS */
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
1131*4882a593Smuzhiyun #define AWDL_NUM_SLOTS 16u /* 0 to 15 are the AWDL slots FW operates on */
1132*4882a593Smuzhiyun #define AWDL_SLOT_MULT 4u /* AWDL slot information sent by FW is in multiples of 4 */
1133*4882a593Smuzhiyun typedef struct dhd_awdl_statistics {
1134*4882a593Smuzhiyun 	uint64 slot_start_time; /* AWDL slot start time in us */
1135*4882a593Smuzhiyun 	uint64 cum_slot_time; /* Cumulative time for which this AWDL slot was active */
1136*4882a593Smuzhiyun 	uint64 num_slots; /* Number of times this AWDL slot was active */
1137*4882a593Smuzhiyun 	uint64 cum_tx_status_latency; /* cum tx_status latency while this AWDL slot is active */
1138*4882a593Smuzhiyun 	uint64 num_tx_status; /* Num of AWDL(flowring with role as AWDL) tx status received */
1139*4882a593Smuzhiyun 	uint64 fw_cum_slot_time; /* Cumulative FW time for which this AWDL slot was active */
1140*4882a593Smuzhiyun 	uint32 fw_slot_start_time; /* AWDL slot start time sent by FW in us */
1141*4882a593Smuzhiyun #if defined(BCMDBG)
1142*4882a593Smuzhiyun 	uint32 tx_status[DHD_MAX_TX_STATUS_MSGS]; /* Dongle return val wrt TX packet sent out */
1143*4882a593Smuzhiyun #endif /* BCMDBG */
1144*4882a593Smuzhiyun } dhd_awdl_stats_t;
1145*4882a593Smuzhiyun #endif /* DHD_AWDL && AWDL_SLOT_STATS */
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun /* Bit in dhd_pub_t::gdb_proxy_stop_count set when firmware is stopped by GDB */
1148*4882a593Smuzhiyun #define GDB_PROXY_STOP_MASK 1
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun /* Enable Reserve STA flowrings only for Android */
1151*4882a593Smuzhiyun #if defined(OEM_ANDROID)
1152*4882a593Smuzhiyun #define DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
1153*4882a593Smuzhiyun #endif /* OEM_ANDROID */
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun typedef enum {
1156*4882a593Smuzhiyun 	FW_UNLOADED = 0,
1157*4882a593Smuzhiyun 	FW_DOWNLOAD_IN_PROGRESS = 1,
1158*4882a593Smuzhiyun 	FW_DOWNLOAD_DONE = 2
1159*4882a593Smuzhiyun } fw_download_status_t;
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun #define PCIE_DB7_MAGIC_NUMBER_ISR_TRAP	0xdead0001
1162*4882a593Smuzhiyun #define PCIE_DB7_MAGIC_NUMBER_DPC_TRAP	0xdead0002
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun typedef struct dhd_db7_info {
1165*4882a593Smuzhiyun 	bool	fw_db7w_trap;
1166*4882a593Smuzhiyun 	bool	fw_db7w_trap_inprogress;
1167*4882a593Smuzhiyun 	uint32	db7_magic_number;
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 	uint32	debug_db7_send_cnt;
1170*4882a593Smuzhiyun 	uint32	debug_db7_trap_cnt;
1171*4882a593Smuzhiyun 	uint32	debug_db7_timing_error_cnt;
1172*4882a593Smuzhiyun 	uint64	debug_db7_send_time;
1173*4882a593Smuzhiyun 	uint64	debug_db7_trap_time;
1174*4882a593Smuzhiyun 	uint64	debug_max_db7_dur;
1175*4882a593Smuzhiyun 	uint64	debug_max_db7_send_time;
1176*4882a593Smuzhiyun 	uint64	debug_max_db7_trap_time;
1177*4882a593Smuzhiyun } dhd_db7_info_t;
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun #ifdef BCMINTERNAL
1180*4882a593Smuzhiyun #ifdef DHD_FWTRACE
1181*4882a593Smuzhiyun typedef struct fwtrace_info fwtrace_info_t; /* forward declaration */
1182*4882a593Smuzhiyun #endif	/* DHD_FWTRACE */
1183*4882a593Smuzhiyun #endif	/* BCMINTERNAL */
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun typedef enum dhd_induce_error_states
1186*4882a593Smuzhiyun {
1187*4882a593Smuzhiyun 	DHD_INDUCE_ERROR_CLEAR		= 0x0,
1188*4882a593Smuzhiyun 	DHD_INDUCE_IOCTL_TIMEOUT	= 0x1,
1189*4882a593Smuzhiyun 	DHD_INDUCE_D3_ACK_TIMEOUT	= 0x2,
1190*4882a593Smuzhiyun 	DHD_INDUCE_LIVELOCK		= 0x3,
1191*4882a593Smuzhiyun 	DHD_INDUCE_DROP_OOB_IRQ		= 0x4,
1192*4882a593Smuzhiyun 	DHD_INDUCE_DROP_AXI_SIG		= 0x5,
1193*4882a593Smuzhiyun 	DHD_INDUCE_TX_BIG_PKT		= 0x6,
1194*4882a593Smuzhiyun 	DHD_INDUCE_IOCTL_SUSPEND_ERROR	= 0x7,
1195*4882a593Smuzhiyun 	/* Big hammer induction */
1196*4882a593Smuzhiyun 	DHD_INDUCE_BH_ON_FAIL_ONCE	= 0x10,
1197*4882a593Smuzhiyun 	DHD_INDUCE_BH_ON_FAIL_ALWAYS	= 0x11,
1198*4882a593Smuzhiyun 	DHD_INDUCE_BH_CBP_HANG		= 0x12,
1199*4882a593Smuzhiyun 	DHD_INDUCE_ERROR_MAX
1200*4882a593Smuzhiyun } dhd_induce_error_states_t;
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun #ifdef DHD_HP2P
1203*4882a593Smuzhiyun #define MAX_TX_HIST_BIN		16
1204*4882a593Smuzhiyun #define MAX_RX_HIST_BIN		10
1205*4882a593Smuzhiyun #define MAX_HP2P_FLOWS		16
1206*4882a593Smuzhiyun #define HP2P_PRIO		7
1207*4882a593Smuzhiyun #define HP2P_PKT_THRESH		48
1208*4882a593Smuzhiyun #define HP2P_TIME_THRESH	200
1209*4882a593Smuzhiyun #define HP2P_PKT_EXPIRY		40
1210*4882a593Smuzhiyun #define	HP2P_TIME_SCALE		32
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun typedef struct hp2p_info {
1213*4882a593Smuzhiyun 	void	*dhd_pub;
1214*4882a593Smuzhiyun 	uint16	flowid;
1215*4882a593Smuzhiyun 	bool	hrtimer_init;
1216*4882a593Smuzhiyun 	void	*ring;
1217*4882a593Smuzhiyun 	struct	hrtimer timer;
1218*4882a593Smuzhiyun 	uint64	num_pkt_limit;
1219*4882a593Smuzhiyun 	uint64	num_timer_limit;
1220*4882a593Smuzhiyun 	uint64	num_timer_start;
1221*4882a593Smuzhiyun 	uint64	tx_t0[MAX_TX_HIST_BIN];
1222*4882a593Smuzhiyun 	uint64	tx_t1[MAX_TX_HIST_BIN];
1223*4882a593Smuzhiyun 	uint64	rx_t0[MAX_RX_HIST_BIN];
1224*4882a593Smuzhiyun } hp2p_info_t;
1225*4882a593Smuzhiyun #endif /* DHD_HP2P */
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
1228*4882a593Smuzhiyun /* Timestamps to trace dhd_logtrace_thread() */
1229*4882a593Smuzhiyun struct dhd_logtrace_thr_ts {
1230*4882a593Smuzhiyun 	uint64 entry_time;
1231*4882a593Smuzhiyun 	uint64 sem_down_time;
1232*4882a593Smuzhiyun 	uint64 flush_time;
1233*4882a593Smuzhiyun 	uint64 unexpected_break_time;
1234*4882a593Smuzhiyun 	uint64 complete_time;
1235*4882a593Smuzhiyun };
1236*4882a593Smuzhiyun #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
1237*4882a593Smuzhiyun 
1238*4882a593Smuzhiyun /**
1239*4882a593Smuzhiyun  * Common structure for module and instance linkage.
1240*4882a593Smuzhiyun  * Instantiated once per hardware (dongle) instance that this DHD manages.
1241*4882a593Smuzhiyun  */
1242*4882a593Smuzhiyun typedef struct dhd_pub {
1243*4882a593Smuzhiyun 	/* Linkage ponters */
1244*4882a593Smuzhiyun 	osl_t *osh;		/* OSL handle */
1245*4882a593Smuzhiyun 	struct dhd_bus *bus;	/* Bus module handle */
1246*4882a593Smuzhiyun 	struct dhd_prot *prot;	/* Protocol module handle */
1247*4882a593Smuzhiyun 	struct dhd_info  *info; /* Info module handle */
1248*4882a593Smuzhiyun 	struct dhd_dbg *dbg;	/* Debugability module handle */
1249*4882a593Smuzhiyun #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
1250*4882a593Smuzhiyun 	struct dhd_logtrace_thr_ts logtrace_thr_ts;
1251*4882a593Smuzhiyun #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 	/* to NDIS developer, the structure dhd_common is redundant,
1254*4882a593Smuzhiyun 	 * please do NOT merge it back from other branches !!!
1255*4882a593Smuzhiyun 	 */
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun #ifdef BCMDBUS
1258*4882a593Smuzhiyun 	struct dbus_pub *dbus;
1259*4882a593Smuzhiyun #endif /* BCMDBUS */
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun 	/* Internal dhd items */
1262*4882a593Smuzhiyun 	bool up;		/* Driver up/down (to OS) */
1263*4882a593Smuzhiyun #ifdef WL_CFG80211
1264*4882a593Smuzhiyun 	spinlock_t up_lock;	/* Synchronization with CFG80211 down */
1265*4882a593Smuzhiyun #endif /* WL_CFG80211 */
1266*4882a593Smuzhiyun 	bool txoff;		/* Transmit flow-controlled */
1267*4882a593Smuzhiyun 	bool dongle_reset;  /* TRUE = DEVRESET put dongle into reset */
1268*4882a593Smuzhiyun 	enum dhd_bus_state busstate;
1269*4882a593Smuzhiyun 	uint dhd_bus_busy_state;	/* Bus busy state */
1270*4882a593Smuzhiyun 	uint hdrlen;		/* Total DHD header length (proto + bus) */
1271*4882a593Smuzhiyun 	uint maxctl;		/* Max size rxctl request from proto to bus */
1272*4882a593Smuzhiyun 	uint rxsz;		/* Rx buffer size bus module should use */
1273*4882a593Smuzhiyun 	uint8 wme_dp;	/* wme discard priority */
1274*4882a593Smuzhiyun #ifdef DNGL_AXI_ERROR_LOGGING
1275*4882a593Smuzhiyun 	uint32 axierror_logbuf_addr;
1276*4882a593Smuzhiyun 	bool axi_error;
1277*4882a593Smuzhiyun 	struct dhd_axi_error_dump *axi_err_dump;
1278*4882a593Smuzhiyun #endif /* DNGL_AXI_ERROR_LOGGING */
1279*4882a593Smuzhiyun 	/* Dongle media info */
1280*4882a593Smuzhiyun 	bool iswl;		/* Dongle-resident driver is wl */
1281*4882a593Smuzhiyun 	ulong drv_version;	/* Version of dongle-resident driver */
1282*4882a593Smuzhiyun 	struct ether_addr mac;	/* MAC address obtained from dongle */
1283*4882a593Smuzhiyun 	dngl_stats_t dstats;	/* Stats for dongle-based data */
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 	/* Additional stats for the bus level */
1286*4882a593Smuzhiyun 	ulong tx_packets;	/* Data packets sent to dongle */
1287*4882a593Smuzhiyun 	ulong actual_tx_pkts;	/* Actual data packets sent to dongle */
1288*4882a593Smuzhiyun 	ulong tot_txcpl;	/* Total Tx completion received */
1289*4882a593Smuzhiyun 	ulong tx_dropped;	/* Data packets dropped in dhd */
1290*4882a593Smuzhiyun 	ulong tx_multicast;	/* Multicast data packets sent to dongle */
1291*4882a593Smuzhiyun 	ulong tx_errors;	/* Errors in sending data to dongle */
1292*4882a593Smuzhiyun 	ulong tx_ctlpkts;	/* Control packets sent to dongle */
1293*4882a593Smuzhiyun 	ulong tx_ctlerrs;	/* Errors sending control frames to dongle */
1294*4882a593Smuzhiyun 	ulong rx_packets;	/* Packets sent up the network interface */
1295*4882a593Smuzhiyun 	ulong rx_multicast;	/* Multicast packets sent up the network interface */
1296*4882a593Smuzhiyun 	ulong rx_errors;	/* Errors processing rx data packets */
1297*4882a593Smuzhiyun 	ulong rx_ctlpkts;	/* Control frames processed from dongle */
1298*4882a593Smuzhiyun 	ulong rx_ctlerrs;	/* Errors in processing rx control frames */
1299*4882a593Smuzhiyun 	ulong rx_dropped;	/* Packets dropped locally (no memory) */
1300*4882a593Smuzhiyun 	ulong rx_flushed;  /* Packets flushed due to unscheduled sendup thread */
1301*4882a593Smuzhiyun 	ulong wd_dpc_sched;   /* Number of times dhd dpc scheduled by watchdog timer */
1302*4882a593Smuzhiyun 	ulong rx_pktgetfail; /* Number of PKTGET failures in DHD on RX */
1303*4882a593Smuzhiyun 	ulong tx_pktgetfail; /* Number of PKTGET failures in DHD on TX */
1304*4882a593Smuzhiyun 	ulong rx_readahead_cnt;	/* Number of packets where header read-ahead was used. */
1305*4882a593Smuzhiyun 	ulong tx_realloc;	/* Number of tx packets we had to realloc for headroom */
1306*4882a593Smuzhiyun 	ulong fc_packets;       /* Number of flow control pkts recvd */
1307*4882a593Smuzhiyun 	ulong tx_big_packets;	/* Dropped data packets that are larger than MAX_MTU_SZ */
1308*4882a593Smuzhiyun #ifdef DMAMAP_STATS
1309*4882a593Smuzhiyun 	/* DMA Mapping statistics */
1310*4882a593Smuzhiyun 	dma_stats_t dma_stats;
1311*4882a593Smuzhiyun #endif /* DMAMAP_STATS */
1312*4882a593Smuzhiyun #ifdef WL_MONITOR
1313*4882a593Smuzhiyun 	bool monitor_enable;
1314*4882a593Smuzhiyun #endif /* WL_MONITOR */
1315*4882a593Smuzhiyun 	/* Last error return */
1316*4882a593Smuzhiyun 	int bcmerror;
1317*4882a593Smuzhiyun 	uint tickcnt;
1318*4882a593Smuzhiyun 
1319*4882a593Smuzhiyun 	/* Last error from dongle */
1320*4882a593Smuzhiyun 	int dongle_error;
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun 	uint8 country_code[WLC_CNTRY_BUF_SZ];
1323*4882a593Smuzhiyun 
1324*4882a593Smuzhiyun 	/* Suspend disable flag and "in suspend" flag */
1325*4882a593Smuzhiyun 	int suspend_disable_flag; /* "1" to disable all extra powersaving during suspend */
1326*4882a593Smuzhiyun 	int in_suspend;			/* flag set to 1 when early suspend called */
1327*4882a593Smuzhiyun #ifdef PNO_SUPPORT
1328*4882a593Smuzhiyun 	int pno_enable;			/* pno status : "1" is pno enable */
1329*4882a593Smuzhiyun 	int pno_suspend;		/* pno suspend status : "1" is pno suspended */
1330*4882a593Smuzhiyun #endif /* PNO_SUPPORT */
1331*4882a593Smuzhiyun 	/* DTIM skip value, default 0(or 1) means wake each DTIM
1332*4882a593Smuzhiyun 	 * 3 means skip 2 DTIMs and wake up 3rd DTIM(9th beacon when AP DTIM is 3)
1333*4882a593Smuzhiyun 	 */
1334*4882a593Smuzhiyun 	int suspend_bcn_li_dtim;         /* bcn_li_dtim value in suspend mode */
1335*4882a593Smuzhiyun 	int early_suspended;	/* Early suspend status */
1336*4882a593Smuzhiyun #ifdef PKT_FILTER_SUPPORT
1337*4882a593Smuzhiyun 	int dhcp_in_progress;	/* DHCP period */
1338*4882a593Smuzhiyun #endif
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	/* Pkt filter defination */
1341*4882a593Smuzhiyun 	char * pktfilter[100];
1342*4882a593Smuzhiyun 	int pktfilter_count;
1343*4882a593Smuzhiyun 
1344*4882a593Smuzhiyun 	wl_country_t dhd_cspec;		/* Current Locale info */
1345*4882a593Smuzhiyun #ifdef CUSTOM_COUNTRY_CODE
1346*4882a593Smuzhiyun 	uint dhd_cflags;
1347*4882a593Smuzhiyun #endif /* CUSTOM_COUNTRY_CODE */
1348*4882a593Smuzhiyun #if defined(DHD_BLOB_EXISTENCE_CHECK)
1349*4882a593Smuzhiyun 	bool is_blob;			/* Checking for existance of Blob file */
1350*4882a593Smuzhiyun #endif /* DHD_BLOB_EXISTENCE_CHECK */
1351*4882a593Smuzhiyun 	bool force_country_change;
1352*4882a593Smuzhiyun 	int	op_mode;				/* STA, HostAPD, WFD, SoftAP */
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun #if defined(LINUX) || defined(linux)
1355*4882a593Smuzhiyun #if defined(OEM_ANDROID)
1356*4882a593Smuzhiyun 	struct mutex wl_start_stop_lock; /* lock/unlock for Android start/stop */
1357*4882a593Smuzhiyun 	struct mutex wl_softap_lock;		 /* lock/unlock for any SoftAP/STA settings */
1358*4882a593Smuzhiyun #endif /* defined(OEM_ANDROID) */
1359*4882a593Smuzhiyun #endif /* defined (LINUX) || defined(linux) */
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun #ifdef NDIS
1362*4882a593Smuzhiyun 	PDEVICE_OBJECT pdo;
1363*4882a593Smuzhiyun 	PDEVICE_OBJECT fdo;
1364*4882a593Smuzhiyun 	PDEVICE_OBJECT nextDeviceObj;
1365*4882a593Smuzhiyun #if defined(BCMWDF)
1366*4882a593Smuzhiyun 	WDFDEVICE wdfDevice;
1367*4882a593Smuzhiyun #endif /* (BCMWDF)  */
1368*4882a593Smuzhiyun #endif /* NDIS */
1369*4882a593Smuzhiyun #ifdef PROP_TXSTATUS
1370*4882a593Smuzhiyun 	bool	wlfc_enabled;
1371*4882a593Smuzhiyun 	int	wlfc_mode;
1372*4882a593Smuzhiyun 	void*	wlfc_state;
1373*4882a593Smuzhiyun 	/*
1374*4882a593Smuzhiyun 	Mode in which the dhd flow control shall operate. Must be set before
1375*4882a593Smuzhiyun 	traffic starts to the device.
1376*4882a593Smuzhiyun 	0 - Do not do any proptxtstatus flow control
1377*4882a593Smuzhiyun 	1 - Use implied credit from a packet status
1378*4882a593Smuzhiyun 	2 - Use explicit credit
1379*4882a593Smuzhiyun 	3 - Only AMPDU hostreorder used. no wlfc.
1380*4882a593Smuzhiyun 	*/
1381*4882a593Smuzhiyun 	uint8	proptxstatus_mode;
1382*4882a593Smuzhiyun 	bool	proptxstatus_txoff;
1383*4882a593Smuzhiyun 	bool	proptxstatus_module_ignore;
1384*4882a593Smuzhiyun 	bool	proptxstatus_credit_ignore;
1385*4882a593Smuzhiyun 	bool	proptxstatus_txstatus_ignore;
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun 	bool	wlfc_rxpkt_chk;
1388*4882a593Smuzhiyun #ifdef LIMIT_BORROW
1389*4882a593Smuzhiyun 	bool wlfc_borrow_allowed;
1390*4882a593Smuzhiyun #endif /* LIMIT_BORROW */
1391*4882a593Smuzhiyun 	/*
1392*4882a593Smuzhiyun 	 * implement below functions in each platform if needed.
1393*4882a593Smuzhiyun 	 */
1394*4882a593Smuzhiyun 	/* platform specific function whether to skip flow control */
1395*4882a593Smuzhiyun 	bool (*skip_fc)(void * dhdp, uint8 ifx);
1396*4882a593Smuzhiyun 	/* platform specific function for wlfc_enable and wlfc_deinit */
1397*4882a593Smuzhiyun 	void (*plat_init)(void *dhd);
1398*4882a593Smuzhiyun 	void (*plat_deinit)(void *dhd);
1399*4882a593Smuzhiyun #ifdef DHD_WLFC_THREAD
1400*4882a593Smuzhiyun 	bool                wlfc_thread_go;
1401*4882a593Smuzhiyun #if defined(LINUX)
1402*4882a593Smuzhiyun 	struct task_struct* wlfc_thread;
1403*4882a593Smuzhiyun 	wait_queue_head_t   wlfc_wqhead;
1404*4882a593Smuzhiyun #else
1405*4882a593Smuzhiyun 	#error "wlfc thread not enabled"
1406*4882a593Smuzhiyun #endif /* LINUX */
1407*4882a593Smuzhiyun #endif /* DHD_WLFC_THREAD */
1408*4882a593Smuzhiyun #endif /* PROP_TXSTATUS */
1409*4882a593Smuzhiyun #ifdef PNO_SUPPORT
1410*4882a593Smuzhiyun 	void *pno_state;
1411*4882a593Smuzhiyun #endif
1412*4882a593Smuzhiyun #ifdef RTT_SUPPORT
1413*4882a593Smuzhiyun 	void *rtt_state;
1414*4882a593Smuzhiyun 	bool rtt_supported;
1415*4882a593Smuzhiyun #endif
1416*4882a593Smuzhiyun #ifdef ROAM_AP_ENV_DETECTION
1417*4882a593Smuzhiyun 	bool	roam_env_detection;
1418*4882a593Smuzhiyun #endif
1419*4882a593Smuzhiyun 	bool	dongle_isolation;
1420*4882a593Smuzhiyun 	bool	is_pcie_watchdog_reset;
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun /* Begin - Variables to track Bus Errors */
1423*4882a593Smuzhiyun 	bool	dongle_trap_occured;	/* flag for sending HANG event to upper layer */
1424*4882a593Smuzhiyun #ifdef BT_OVER_PCIE
1425*4882a593Smuzhiyun 	bool	dongle_trap_due_to_bt; /* flag to indicate that dongle has trapped due to BT */
1426*4882a593Smuzhiyun #endif /* BT_OVER_PCIE */
1427*4882a593Smuzhiyun 	bool	iovar_timeout_occured;	/* flag to indicate iovar resumed on timeout */
1428*4882a593Smuzhiyun 	bool	invalid_shinfo_nrfrags;	/* flag to indicate invlaid shinfo nrfrags */
1429*4882a593Smuzhiyun 	bool	is_sched_error;		/* flag to indicate timeout due to scheduling issue */
1430*4882a593Smuzhiyun #ifdef PCIE_FULL_DONGLE
1431*4882a593Smuzhiyun 	bool	d3ack_timeout_occured;	/* flag to indicate d3ack resumed on timeout */
1432*4882a593Smuzhiyun 	bool	livelock_occured;	/* flag to indicate livelock occured */
1433*4882a593Smuzhiyun 	bool	pktid_audit_failed;	/* flag to indicate pktid audit failure */
1434*4882a593Smuzhiyun #endif /* PCIE_FULL_DONGLE */
1435*4882a593Smuzhiyun 	bool	iface_op_failed;	/* flag to indicate interface operation failed */
1436*4882a593Smuzhiyun 	bool	scan_timeout_occurred;	/* flag to indicate scan has timedout */
1437*4882a593Smuzhiyun 	bool	scan_busy_occurred;	/* flag to indicate scan busy occurred */
1438*4882a593Smuzhiyun #ifdef BT_OVER_SDIO
1439*4882a593Smuzhiyun 	bool	is_bt_recovery_required;
1440*4882a593Smuzhiyun #endif
1441*4882a593Smuzhiyun 	bool	smmu_fault_occurred;	/* flag to indicate SMMU Fault */
1442*4882a593Smuzhiyun /*
1443*4882a593Smuzhiyun  * Add any new variables to track Bus errors above
1444*4882a593Smuzhiyun  * this line. Also ensure that the variable is
1445*4882a593Smuzhiyun  * cleared from dhd_clear_bus_errors
1446*4882a593Smuzhiyun  */
1447*4882a593Smuzhiyun /* End - Variables to track Bus Errors */
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun 	int   hang_was_sent;
1450*4882a593Smuzhiyun 	int   hang_was_pending;
1451*4882a593Smuzhiyun 	int   rxcnt_timeout;		/* counter rxcnt timeout to send HANG */
1452*4882a593Smuzhiyun 	int   txcnt_timeout;		/* counter txcnt timeout to send HANG */
1453*4882a593Smuzhiyun #ifdef BCMPCIE
1454*4882a593Smuzhiyun 	int   d3ackcnt_timeout;		/* counter d3ack timeout to send HANG */
1455*4882a593Smuzhiyun #endif /* BCMPCIE */
1456*4882a593Smuzhiyun 	bool hang_report;		/* enable hang report by default */
1457*4882a593Smuzhiyun 	uint16 hang_reason;		/* reason codes for HANG event */
1458*4882a593Smuzhiyun #if defined(DHD_HANG_SEND_UP_TEST)
1459*4882a593Smuzhiyun 	uint req_hang_type;
1460*4882a593Smuzhiyun #endif /* DHD_HANG_SEND_UP_TEST */
1461*4882a593Smuzhiyun #ifdef DHD_DETECT_CONSECUTIVE_MFG_HANG
1462*4882a593Smuzhiyun 	uint hang_count;
1463*4882a593Smuzhiyun #endif /* DHD_DETECT_CONSECUTIVE_MFG_HANG */
1464*4882a593Smuzhiyun #ifdef WLTDLS
1465*4882a593Smuzhiyun 	bool tdls_enable;
1466*4882a593Smuzhiyun #endif
1467*4882a593Smuzhiyun 	struct reorder_info *reorder_bufs[WLHOST_REORDERDATA_MAXFLOWS];
1468*4882a593Smuzhiyun 	#define WLC_IOCTL_MAXBUF_FWCAP	1024
1469*4882a593Smuzhiyun 	char  fw_capabilities[WLC_IOCTL_MAXBUF_FWCAP];
1470*4882a593Smuzhiyun 	#define DHD_IOCTL_MAXBUF_DHDCAP	1024
1471*4882a593Smuzhiyun 	char  dhd_capabilities[DHD_IOCTL_MAXBUF_DHDCAP];
1472*4882a593Smuzhiyun 	#define MAXSKBPEND 1024
1473*4882a593Smuzhiyun 	void *skbbuf[MAXSKBPEND];
1474*4882a593Smuzhiyun 	uint32 store_idx;
1475*4882a593Smuzhiyun 	uint32 sent_idx;
1476*4882a593Smuzhiyun #ifdef DHDTCPACK_SUPPRESS
1477*4882a593Smuzhiyun 	uint8 tcpack_sup_mode;		/* TCPACK suppress mode */
1478*4882a593Smuzhiyun 	void *tcpack_sup_module;	/* TCPACK suppress module */
1479*4882a593Smuzhiyun 	uint32 tcpack_sup_ratio;
1480*4882a593Smuzhiyun 	uint32 tcpack_sup_delay;
1481*4882a593Smuzhiyun #endif /* DHDTCPACK_SUPPRESS */
1482*4882a593Smuzhiyun #if defined(ARP_OFFLOAD_SUPPORT)
1483*4882a593Smuzhiyun 	uint32 arp_version;
1484*4882a593Smuzhiyun 	bool hmac_updated;
1485*4882a593Smuzhiyun #endif
1486*4882a593Smuzhiyun #if defined(BCMSUP_4WAY_HANDSHAKE)
1487*4882a593Smuzhiyun 	bool fw_4way_handshake;		/* Whether firmware will to do the 4way handshake. */
1488*4882a593Smuzhiyun #endif
1489*4882a593Smuzhiyun #ifdef BCMINTERNAL
1490*4882a593Smuzhiyun 	bool loopback; /* 1- enable loopback of tx packets, 0 - disable */
1491*4882a593Smuzhiyun #endif /* BCMINTERNAL */
1492*4882a593Smuzhiyun #ifdef DEBUG_DPC_THREAD_WATCHDOG
1493*4882a593Smuzhiyun 	bool dhd_bug_on;
1494*4882a593Smuzhiyun #endif /* DEBUG_DPC_THREAD_WATCHDOG */
1495*4882a593Smuzhiyun #ifdef CUSTOM_SET_CPUCORE
1496*4882a593Smuzhiyun 	struct task_struct * current_dpc;
1497*4882a593Smuzhiyun 	struct task_struct * current_rxf;
1498*4882a593Smuzhiyun 	int chan_isvht80;
1499*4882a593Smuzhiyun #endif /* CUSTOM_SET_CPUCORE */
1500*4882a593Smuzhiyun 	void    *sta_pool;          /* pre-allocated pool of sta objects */
1501*4882a593Smuzhiyun 	void    *staid_allocator;   /* allocator of sta indexes */
1502*4882a593Smuzhiyun #ifdef PCIE_FULL_DONGLE
1503*4882a593Smuzhiyun 	bool	flow_rings_inited;	/* set this flag after initializing flow rings */
1504*4882a593Smuzhiyun #endif /* PCIE_FULL_DONGLE */
1505*4882a593Smuzhiyun 	void    *flowid_allocator;  /* unique flowid allocator */
1506*4882a593Smuzhiyun #if defined(DHD_HTPUT_TUNABLES)
1507*4882a593Smuzhiyun 	void    *htput_flowid_allocator;  /* unique htput flowid allocator */
1508*4882a593Smuzhiyun 	uint8	htput_client_flow_rings;  /* current number of htput client flowrings */
1509*4882a593Smuzhiyun 	uint8	htput_flow_ring_start;	  /* start index of htput flow rings */
1510*4882a593Smuzhiyun #endif /* DHD_HTPUT_TUNABLES */
1511*4882a593Smuzhiyun 	void	*flow_ring_table;   /* flow ring table, include prot and bus info */
1512*4882a593Smuzhiyun 	void	*if_flow_lkup;      /* per interface flowid lkup hash table */
1513*4882a593Smuzhiyun 	void    *flowid_lock;       /* per os lock for flowid info protection */
1514*4882a593Smuzhiyun 	void    *flowring_list_lock;       /* per os lock for flowring list protection */
1515*4882a593Smuzhiyun 	uint8	max_multi_client_flow_rings;
1516*4882a593Smuzhiyun 	uint8	multi_client_flow_rings;
1517*4882a593Smuzhiyun 	uint32  num_h2d_rings;		/* Max h2d rings including static and dynamic rings */
1518*4882a593Smuzhiyun 	uint32  max_tx_flowid;		/* used to validate flowid */
1519*4882a593Smuzhiyun 	cumm_ctr_t cumm_ctr;        /* cumm queue length placeholder  */
1520*4882a593Smuzhiyun 	cumm_ctr_t l2cumm_ctr;      /* level 2 cumm queue length placeholder */
1521*4882a593Smuzhiyun 	uint32 d2h_sync_mode;       /* D2H DMA completion sync mode */
1522*4882a593Smuzhiyun 	uint8  flow_prio_map[NUMPRIO];
1523*4882a593Smuzhiyun 	uint8	flow_prio_map_type;
1524*4882a593Smuzhiyun 	char enable_log[MAX_EVENT];
1525*4882a593Smuzhiyun 	bool dma_d2h_ring_upd_support;
1526*4882a593Smuzhiyun 	bool dma_h2d_ring_upd_support;
1527*4882a593Smuzhiyun 	bool dma_ring_upd_overwrite;	/* host overwrites support setting */
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun 	bool idma_enable;
1530*4882a593Smuzhiyun 	uint idma_inited;
1531*4882a593Smuzhiyun 
1532*4882a593Smuzhiyun 	bool ifrm_enable;			/* implicit frm enable */
1533*4882a593Smuzhiyun 	uint ifrm_inited;			/* implicit frm init */
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 	bool dar_enable;		/* use DAR registers */
1536*4882a593Smuzhiyun 	uint dar_inited;
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun 	bool fast_delete_ring_support;		/* fast delete ring supported */
1539*4882a593Smuzhiyun 
1540*4882a593Smuzhiyun #ifdef DHD_WMF
1541*4882a593Smuzhiyun 	bool wmf_ucast_igmp;
1542*4882a593Smuzhiyun #ifdef DHD_IGMP_UCQUERY
1543*4882a593Smuzhiyun 	bool wmf_ucast_igmp_query;
1544*4882a593Smuzhiyun #endif
1545*4882a593Smuzhiyun #ifdef DHD_UCAST_UPNP
1546*4882a593Smuzhiyun 	bool wmf_ucast_upnp;
1547*4882a593Smuzhiyun #endif
1548*4882a593Smuzhiyun #endif /* DHD_WMF */
1549*4882a593Smuzhiyun #if defined(BCM_ROUTER_DHD)
1550*4882a593Smuzhiyun 	dhd_trf_mgmt_dwm_tbl_t dhd_tm_dwm_tbl;
1551*4882a593Smuzhiyun #endif /* BCM_ROUTER_DHD */
1552*4882a593Smuzhiyun #ifdef DHD_L2_FILTER
1553*4882a593Smuzhiyun 	unsigned long l2_filter_cnt;	/* for L2_FILTER ARP table timeout */
1554*4882a593Smuzhiyun #endif /* DHD_L2_FILTER */
1555*4882a593Smuzhiyun #ifdef DHD_SSSR_DUMP
1556*4882a593Smuzhiyun 	bool sssr_inited;
1557*4882a593Smuzhiyun 	bool sssr_dump_collected;	/* Flag to indicate sssr dump is collected */
1558*4882a593Smuzhiyun 	sssr_reg_info_cmn_t *sssr_reg_info;
1559*4882a593Smuzhiyun 	uint8 *sssr_mempool;
1560*4882a593Smuzhiyun #ifdef DHD_SSSR_DUMP_BEFORE_SR
1561*4882a593Smuzhiyun 	uint *sssr_d11_before[MAX_NUM_D11_CORES_WITH_SCAN];
1562*4882a593Smuzhiyun 	uint *sssr_dig_buf_before;
1563*4882a593Smuzhiyun #endif /* DHD_SSSR_DUMP_BEFORE_SR */
1564*4882a593Smuzhiyun 	uint *sssr_d11_after[MAX_NUM_D11_CORES_WITH_SCAN];
1565*4882a593Smuzhiyun 	bool sssr_d11_outofreset[MAX_NUM_D11_CORES_WITH_SCAN];
1566*4882a593Smuzhiyun 	uint *sssr_dig_buf_after;
1567*4882a593Smuzhiyun 	uint32 sssr_dump_mode;
1568*4882a593Smuzhiyun 	bool collect_sssr;		/* Flag to indicate SSSR dump is required */
1569*4882a593Smuzhiyun 	bool fis_triggered;
1570*4882a593Smuzhiyun #endif /* DHD_SSSR_DUMP */
1571*4882a593Smuzhiyun #ifdef DHD_SDTC_ETB_DUMP
1572*4882a593Smuzhiyun 	etb_addr_info_t etb_addr_info;
1573*4882a593Smuzhiyun 	uint8 *sdtc_etb_mempool;
1574*4882a593Smuzhiyun 	bool sdtc_etb_inited;
1575*4882a593Smuzhiyun 	bool collect_sdtc;		/* Flag to indicate SDTC dump is required */
1576*4882a593Smuzhiyun #endif /* DHD_SDTC_ETB_DUMP */
1577*4882a593Smuzhiyun 	uint8 *soc_ram;
1578*4882a593Smuzhiyun 	uint32 soc_ram_length;
1579*4882a593Smuzhiyun 	uint32 memdump_type;
1580*4882a593Smuzhiyun #ifdef DHD_COREDUMP
1581*4882a593Smuzhiyun 	char memdump_str[DHD_MEMDUMP_LONGSTR_LEN];
1582*4882a593Smuzhiyun #endif /* DHD_COREDUMP */
1583*4882a593Smuzhiyun #ifdef DHD_RND_DEBUG
1584*4882a593Smuzhiyun 	uint8 *rnd_buf;
1585*4882a593Smuzhiyun 	uint32 rnd_len;
1586*4882a593Smuzhiyun #endif /* DHD_RND_DEBUG */
1587*4882a593Smuzhiyun #ifdef DHD_FW_COREDUMP
1588*4882a593Smuzhiyun 	uint32 memdump_enabled;
1589*4882a593Smuzhiyun #ifdef DHD_DEBUG_UART
1590*4882a593Smuzhiyun 	bool memdump_success;
1591*4882a593Smuzhiyun #endif	/* DHD_DEBUG_UART */
1592*4882a593Smuzhiyun #endif /* DHD_FW_COREDUMP */
1593*4882a593Smuzhiyun #ifdef PCIE_FULL_DONGLE
1594*4882a593Smuzhiyun #ifdef WLTDLS
1595*4882a593Smuzhiyun 	tdls_peer_tbl_t peer_tbl;
1596*4882a593Smuzhiyun #endif /* WLTDLS */
1597*4882a593Smuzhiyun #if defined(LINUX) || defined(linux)
1598*4882a593Smuzhiyun 	uint8 tx_in_progress;
1599*4882a593Smuzhiyun #endif /* LINUX || linux */
1600*4882a593Smuzhiyun #endif /* PCIE_FULL_DONGLE */
1601*4882a593Smuzhiyun #ifdef CACHE_FW_IMAGES
1602*4882a593Smuzhiyun 	char	*cached_fw;
1603*4882a593Smuzhiyun 	int	cached_fw_length;
1604*4882a593Smuzhiyun 	char	*cached_nvram;
1605*4882a593Smuzhiyun 	int	cached_nvram_length;
1606*4882a593Smuzhiyun 	char	*cached_clm;
1607*4882a593Smuzhiyun 	int	cached_clm_length;
1608*4882a593Smuzhiyun 	char	*cached_txcap;
1609*4882a593Smuzhiyun 	int	cached_txcap_length;
1610*4882a593Smuzhiyun #endif
1611*4882a593Smuzhiyun #ifdef KEEP_JP_REGREV
1612*4882a593Smuzhiyun /* XXX Needed by customer's request */
1613*4882a593Smuzhiyun 	char vars_ccode[WLC_CNTRY_BUF_SZ];
1614*4882a593Smuzhiyun 	uint vars_regrev;
1615*4882a593Smuzhiyun #endif /* KEEP_JP_REGREV */
1616*4882a593Smuzhiyun #ifdef WLTDLS
1617*4882a593Smuzhiyun 	uint32 tdls_mode;
1618*4882a593Smuzhiyun #endif
1619*4882a593Smuzhiyun #ifdef GSCAN_SUPPORT
1620*4882a593Smuzhiyun 	bool lazy_roam_enable;
1621*4882a593Smuzhiyun #endif
1622*4882a593Smuzhiyun #if defined(PKT_FILTER_SUPPORT) && defined(APF)
1623*4882a593Smuzhiyun 	bool apf_set;
1624*4882a593Smuzhiyun #endif /* PKT_FILTER_SUPPORT && APF */
1625*4882a593Smuzhiyun 	void *macdbg_info;
1626*4882a593Smuzhiyun #ifdef DHD_WET
1627*4882a593Smuzhiyun 	void *wet_info;
1628*4882a593Smuzhiyun #endif
1629*4882a593Smuzhiyun 	bool	h2d_phase_supported;
1630*4882a593Smuzhiyun 	bool	force_dongletrap_on_bad_h2d_phase;
1631*4882a593Smuzhiyun 	uint32	dongle_trap_data;
1632*4882a593Smuzhiyun 	fw_download_status_t	fw_download_status;
1633*4882a593Smuzhiyun 	trap_t	last_trap_info; /* trap info from the last trap */
1634*4882a593Smuzhiyun 	uint8 rand_mac_oui[DOT11_OUI_LEN];
1635*4882a593Smuzhiyun #ifdef DHD_LOSSLESS_ROAMING
1636*4882a593Smuzhiyun 	uint8 dequeue_prec_map;
1637*4882a593Smuzhiyun 	uint8 prio_8021x;
1638*4882a593Smuzhiyun #endif
1639*4882a593Smuzhiyun #ifdef WL_NATOE
1640*4882a593Smuzhiyun 	struct dhd_nfct_info *nfct;
1641*4882a593Smuzhiyun 	spinlock_t nfct_lock;
1642*4882a593Smuzhiyun #endif /* WL_NATOE */
1643*4882a593Smuzhiyun 	/* timesync link */
1644*4882a593Smuzhiyun 	struct dhd_ts *ts;
1645*4882a593Smuzhiyun 	bool	d2h_hostrdy_supported;
1646*4882a593Smuzhiyun #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1647*4882a593Smuzhiyun 	atomic_t block_bus;
1648*4882a593Smuzhiyun #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1649*4882a593Smuzhiyun #if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
1650*4882a593Smuzhiyun 	bool d11_tx_status;
1651*4882a593Smuzhiyun #endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
1652*4882a593Smuzhiyun 	uint16 ndo_version;	/* ND offload version supported */
1653*4882a593Smuzhiyun #ifdef NDO_CONFIG_SUPPORT
1654*4882a593Smuzhiyun 	bool ndo_enable;		/* ND offload feature enable */
1655*4882a593Smuzhiyun 	bool ndo_host_ip_overflow;	/* # of host ip addr exceed FW capacity */
1656*4882a593Smuzhiyun 	uint32 ndo_max_host_ip;		/* # of host ip addr supported by FW */
1657*4882a593Smuzhiyun #endif /* NDO_CONFIG_SUPPORT */
1658*4882a593Smuzhiyun #if defined(DHD_LOG_DUMP)
1659*4882a593Smuzhiyun #if defined(DHD_EFI)
1660*4882a593Smuzhiyun 	uint8 log_capture_enable;
1661*4882a593Smuzhiyun #endif /* DHD_EFI */
1662*4882a593Smuzhiyun 	/* buffer to hold 'dhd dump' data before dumping to file */
1663*4882a593Smuzhiyun 	uint8 *concise_dbg_buf;
1664*4882a593Smuzhiyun 	uint64 last_file_posn;
1665*4882a593Smuzhiyun 	int logdump_periodic_flush;
1666*4882a593Smuzhiyun #ifdef EWP_ECNTRS_LOGGING
1667*4882a593Smuzhiyun 	void *ecntr_dbg_ring;
1668*4882a593Smuzhiyun #endif
1669*4882a593Smuzhiyun #ifdef EWP_RTT_LOGGING
1670*4882a593Smuzhiyun 	void *rtt_dbg_ring;
1671*4882a593Smuzhiyun #endif
1672*4882a593Smuzhiyun #ifdef EWP_BCM_TRACE
1673*4882a593Smuzhiyun 	void *bcm_trace_dbg_ring;
1674*4882a593Smuzhiyun #endif
1675*4882a593Smuzhiyun #ifdef DNGL_EVENT_SUPPORT
1676*4882a593Smuzhiyun 	uint8 health_chk_event_data[HEALTH_CHK_BUF_SIZE];
1677*4882a593Smuzhiyun #endif
1678*4882a593Smuzhiyun 	void *logdump_cookie;
1679*4882a593Smuzhiyun #endif /* DHD_LOG_DUMP */
1680*4882a593Smuzhiyun 	uint32 dhd_console_ms; /** interval for polling the dongle for console (log) messages */
1681*4882a593Smuzhiyun 	bool ext_trap_data_supported;
1682*4882a593Smuzhiyun 	uint32 *extended_trap_data;
1683*4882a593Smuzhiyun #ifdef DUMP_IOCTL_IOV_LIST
1684*4882a593Smuzhiyun 	/* dump iovar list */
1685*4882a593Smuzhiyun 	dll_t dump_iovlist_head;
1686*4882a593Smuzhiyun 	uint8 dump_iovlist_len;
1687*4882a593Smuzhiyun #endif /* DUMP_IOCTL_IOV_LIST */
1688*4882a593Smuzhiyun #ifdef REPORT_FATAL_TIMEOUTS
1689*4882a593Smuzhiyun 	timeout_info_t *timeout_info;
1690*4882a593Smuzhiyun 	uint16 esync_id; /* used to track escans */
1691*4882a593Smuzhiyun 	osl_atomic_t set_ssid_rcvd; /* to track if WLC_E_SET_SSID is received during join IOVAR */
1692*4882a593Smuzhiyun 	bool secure_join; /* field to note that the join is secure or not */
1693*4882a593Smuzhiyun #endif /* REPORT_FATAL_TIMEOUTS */
1694*4882a593Smuzhiyun #ifdef CUSTOM_SET_ANTNPM
1695*4882a593Smuzhiyun 	uint32 mimo_ant_set;
1696*4882a593Smuzhiyun #endif /* CUSTOM_SET_ANTNPM */
1697*4882a593Smuzhiyun #ifdef CUSTOM_SET_OCLOFF
1698*4882a593Smuzhiyun 	bool ocl_off;
1699*4882a593Smuzhiyun #endif /* CUSTOM_SET_OCLOFF */
1700*4882a593Smuzhiyun #ifdef DHD_DEBUG
1701*4882a593Smuzhiyun 	/* memwaste feature */
1702*4882a593Smuzhiyun 	dll_t mw_list_head; /* memwaste list head */
1703*4882a593Smuzhiyun 	uint32 mw_id; /* memwaste list unique id */
1704*4882a593Smuzhiyun #endif /* DHD_DEBUG */
1705*4882a593Smuzhiyun #ifdef WLTDLS
1706*4882a593Smuzhiyun 	spinlock_t tdls_lock;
1707*4882a593Smuzhiyun #endif /* WLTDLS */
1708*4882a593Smuzhiyun 	uint pcie_txs_metadata_enable;
1709*4882a593Smuzhiyun #ifdef BTLOG
1710*4882a593Smuzhiyun 	bool bt_logging;
1711*4882a593Smuzhiyun 	bool submit_count_WAR;	/* submission count WAR */
1712*4882a593Smuzhiyun 	bool bt_logging_enabled;
1713*4882a593Smuzhiyun #endif	/* BTLOG */
1714*4882a593Smuzhiyun 	uint wbtext_policy;	/* wbtext policy of dongle */
1715*4882a593Smuzhiyun 	bool wbtext_support;	/* for product policy only */
1716*4882a593Smuzhiyun #ifdef PCIE_OOB
1717*4882a593Smuzhiyun 	bool	d2h_no_oob_dw;
1718*4882a593Smuzhiyun #endif /* PCIE_OOB */
1719*4882a593Smuzhiyun #ifdef PCIE_INB_DW
1720*4882a593Smuzhiyun 	bool	d2h_inband_dw;
1721*4882a593Smuzhiyun 	enum dhd_bus_ds_state	ds_state;
1722*4882a593Smuzhiyun #endif /* PCIE_INB_DW */
1723*4882a593Smuzhiyun 	bool max_dtim_enable;	/* use MAX bcn_li_dtim value in suspend mode */
1724*4882a593Smuzhiyun #ifdef SNAPSHOT_UPLOAD
1725*4882a593Smuzhiyun 	bool snapshot_upload;
1726*4882a593Smuzhiyun #endif /* SNAPSHOT_UPLOAD */
1727*4882a593Smuzhiyun 	tput_test_t tput_data;
1728*4882a593Smuzhiyun 	uint64 tput_start_ts;
1729*4882a593Smuzhiyun 	uint64 tput_stop_ts;
1730*4882a593Smuzhiyun 	uint dhd_watchdog_ms_backup;
1731*4882a593Smuzhiyun 	bool wl_event_enabled;
1732*4882a593Smuzhiyun 	bool logtrace_pkt_sendup;
1733*4882a593Smuzhiyun #ifdef GDB_PROXY
1734*4882a593Smuzhiyun 	/* True if firmware runs under gdb control (this may cause timeouts at any point) */
1735*4882a593Smuzhiyun 	bool gdb_proxy_active;
1736*4882a593Smuzhiyun 	/* True if deadman_to shall be forced to 0 */
1737*4882a593Smuzhiyun 	bool gdb_proxy_nodeadman;
1738*4882a593Smuzhiyun 	/* Counter incremented at each firmware stop/go transition. LSB (GDB_PROXY_STOP_MASK)
1739*4882a593Smuzhiyun 	 * is set when firmwar eis stopped, clear when running
1740*4882a593Smuzhiyun 	 */
1741*4882a593Smuzhiyun 	uint32 gdb_proxy_stop_count;
1742*4882a593Smuzhiyun #endif /* GDB_PROXY */
1743*4882a593Smuzhiyun 	int debug_dump_subcmd;
1744*4882a593Smuzhiyun 	uint64 debug_dump_time_sec;
1745*4882a593Smuzhiyun 	bool hscb_enable;
1746*4882a593Smuzhiyun #if defined(DHD_AWDL)
1747*4882a593Smuzhiyun #if defined(AWDL_SLOT_STATS)
1748*4882a593Smuzhiyun 	dhd_awdl_stats_t awdl_stats[AWDL_NUM_SLOTS];
1749*4882a593Smuzhiyun 	uint8 awdl_tx_status_slot; /* Slot in which AWDL is active right now */
1750*4882a593Smuzhiyun 	void *awdl_stats_lock; /* Lock to protect against parallel AWDL stats updates */
1751*4882a593Smuzhiyun 	uint16 awdl_aw_counter;
1752*4882a593Smuzhiyun 	uint32 pkt_latency;
1753*4882a593Smuzhiyun #endif /* AWDL_SLOT_STATS */
1754*4882a593Smuzhiyun 	uint32 awdl_ifidx;
1755*4882a593Smuzhiyun 	uint16 awdl_seq;
1756*4882a593Smuzhiyun 	uint8 awdl_minext;
1757*4882a593Smuzhiyun 	uint8 awdl_presmode;
1758*4882a593Smuzhiyun 	bool awdl_llc_enabled;
1759*4882a593Smuzhiyun #endif /* DHD_AWDL */
1760*4882a593Smuzhiyun 	uint32 logset_prsrv_mask;
1761*4882a593Smuzhiyun #ifdef DHD_PKT_LOGGING
1762*4882a593Smuzhiyun 	struct dhd_pktlog *pktlog;
1763*4882a593Smuzhiyun 	char debug_dump_time_pktlog_str[DEBUG_DUMP_TIME_BUF_LEN];
1764*4882a593Smuzhiyun 	bool pktlog_debug;
1765*4882a593Smuzhiyun #endif /* DHD_PKT_LOGGING */
1766*4882a593Smuzhiyun #ifdef EWP_EDL
1767*4882a593Smuzhiyun 	bool dongle_edl_support;
1768*4882a593Smuzhiyun 	dhd_dma_buf_t edl_ring_mem;
1769*4882a593Smuzhiyun #endif /* EWP_EDL */
1770*4882a593Smuzhiyun #if defined (LINUX) || defined(linux)
1771*4882a593Smuzhiyun 	struct mutex ndev_op_sync;
1772*4882a593Smuzhiyun #endif /* defined (LINUX) || defined(linux) */
1773*4882a593Smuzhiyun 	bool debug_buf_dest_support;
1774*4882a593Smuzhiyun 	uint32 debug_buf_dest_stat[DEBUG_BUF_DEST_MAX];
1775*4882a593Smuzhiyun #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
1776*4882a593Smuzhiyun 	char *hang_info;
1777*4882a593Smuzhiyun 	int hang_info_cnt;
1778*4882a593Smuzhiyun 	char debug_dump_time_hang_str[DEBUG_DUMP_TIME_BUF_LEN];
1779*4882a593Smuzhiyun #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
1780*4882a593Smuzhiyun 	char debug_dump_time_str[DEBUG_DUMP_TIME_BUF_LEN];
1781*4882a593Smuzhiyun 	void *event_log_filter;
1782*4882a593Smuzhiyun 	uint tput_test_done;
1783*4882a593Smuzhiyun #if defined(LINUX) || defined(linux)
1784*4882a593Smuzhiyun 	wait_queue_head_t tx_tput_test_wait;
1785*4882a593Smuzhiyun 	wait_queue_head_t tx_completion_wait;
1786*4882a593Smuzhiyun #ifdef WL_NANHO
1787*4882a593Smuzhiyun 	void *nanhoi; /* NANHO instance */
1788*4882a593Smuzhiyun #endif /* WL_NANHO */
1789*4882a593Smuzhiyun #endif /* defined(LINUX) || defined(linux) */
1790*4882a593Smuzhiyun #ifdef DHD_ERPOM
1791*4882a593Smuzhiyun 	bool enable_erpom;
1792*4882a593Smuzhiyun 	pom_func_handler_t pom_wlan_handler;
1793*4882a593Smuzhiyun 	int (*pom_func_register)(pom_func_handler_t *func);
1794*4882a593Smuzhiyun 	int (*pom_func_deregister)(pom_func_handler_t *func);
1795*4882a593Smuzhiyun 	int (*pom_toggle_reg_on)(uchar func_id, uchar reason);
1796*4882a593Smuzhiyun #endif /* DHD_ERPOM */
1797*4882a593Smuzhiyun #if defined(DHD_H2D_LOG_TIME_SYNC)
1798*4882a593Smuzhiyun #define DHD_H2D_LOG_TIME_STAMP_MATCH	(10000) /* 10 Seconds */
1799*4882a593Smuzhiyun 	/*
1800*4882a593Smuzhiyun 	 * Interval for updating the dongle console message time stamp with the Host (DHD)
1801*4882a593Smuzhiyun 	 * time stamp
1802*4882a593Smuzhiyun 	 */
1803*4882a593Smuzhiyun 	uint32 dhd_rte_time_sync_ms;
1804*4882a593Smuzhiyun #endif /* DHD_H2D_LOG_TIME_SYNC */
1805*4882a593Smuzhiyun 	uint32 batch_tx_pkts_cmpl;
1806*4882a593Smuzhiyun 	uint32 batch_tx_num_pkts;
1807*4882a593Smuzhiyun #ifdef DHD_EFI
1808*4882a593Smuzhiyun 	bool insert_random_mac;
1809*4882a593Smuzhiyun 	/* threshold # of pkts Tx'd/Rx'd after which efi dhd
1810*4882a593Smuzhiyun 	 * will switch intr poll period to 100us
1811*4882a593Smuzhiyun 	*/
1812*4882a593Smuzhiyun 	uint64 npkts_thresh;
1813*4882a593Smuzhiyun 	/* the period of time in which if no pkt is Tx'd/Rx'd
1814*4882a593Smuzhiyun 	 * efi dhd will restore intr poll period to default value
1815*4882a593Smuzhiyun 	*/
1816*4882a593Smuzhiyun 	uint64 pkt_intvl_thresh_us;
1817*4882a593Smuzhiyun 	/* time stamp of last Tx'd pkt */
1818*4882a593Smuzhiyun 	uint64 tx_last_pkt_ts;
1819*4882a593Smuzhiyun 	/* time stamp of last Rx'd pkt */
1820*4882a593Smuzhiyun 	uint64 rx_last_pkt_ts;
1821*4882a593Smuzhiyun 	/* used to temporarily store the current intr poll period
1822*4882a593Smuzhiyun 	 * during efi dhd iovar execution, so as to restore it back
1823*4882a593Smuzhiyun 	 * once iovar completes
1824*4882a593Smuzhiyun 	*/
1825*4882a593Smuzhiyun 	uint32 cur_intr_poll_period;
1826*4882a593Smuzhiyun 	/* the intr poll period set by user through dhd iovar */
1827*4882a593Smuzhiyun 	uint32 iovar_intr_poll_period;
1828*4882a593Smuzhiyun 	bool pcie_readshared_done;
1829*4882a593Smuzhiyun #endif /* DHD_EFI */
1830*4882a593Smuzhiyun #ifdef DHD_DUMP_MNGR
1831*4882a593Smuzhiyun 	struct _dhd_dump_file_manage *dump_file_manage;
1832*4882a593Smuzhiyun #endif /* DHD_DUMP_MNGR */
1833*4882a593Smuzhiyun #ifdef BCMINTERNAL
1834*4882a593Smuzhiyun #ifdef DHD_FWTRACE
1835*4882a593Smuzhiyun 	fwtrace_info_t *fwtrace_info; /* f/w trace information */
1836*4882a593Smuzhiyun #endif	/* DHD_FWTRACE */
1837*4882a593Smuzhiyun #endif	/* BCMINTERNAL */
1838*4882a593Smuzhiyun 	bool event_log_max_sets_queried;
1839*4882a593Smuzhiyun 	uint32 event_log_max_sets;
1840*4882a593Smuzhiyun #ifdef DHD_STATUS_LOGGING
1841*4882a593Smuzhiyun 	void *statlog;
1842*4882a593Smuzhiyun #endif /* DHD_STATUS_LOGGING */
1843*4882a593Smuzhiyun #ifdef DHD_HP2P
1844*4882a593Smuzhiyun 	/* whether enabled from host by user iovar */
1845*4882a593Smuzhiyun 	bool hp2p_enable;
1846*4882a593Smuzhiyun 	bool hp2p_infra_enable;
1847*4882a593Smuzhiyun 	/* whether fw supports it */
1848*4882a593Smuzhiyun 	bool hp2p_capable;
1849*4882a593Smuzhiyun 	bool hp2p_mf_enable;
1850*4882a593Smuzhiyun 	bool hp2p_ts_capable;
1851*4882a593Smuzhiyun 	uint16 pkt_thresh;
1852*4882a593Smuzhiyun 	uint16 time_thresh;
1853*4882a593Smuzhiyun 	uint16 pkt_expiry;
1854*4882a593Smuzhiyun 	hp2p_info_t hp2p_info[MAX_HP2P_FLOWS];
1855*4882a593Smuzhiyun 	/* Flag to allow more hp2p ring creation */
1856*4882a593Smuzhiyun 	bool hp2p_ring_more;
1857*4882a593Smuzhiyun #endif /* D2H_HP2P */
1858*4882a593Smuzhiyun #ifdef DHD_DB0TS
1859*4882a593Smuzhiyun 	bool db0ts_capable;
1860*4882a593Smuzhiyun #endif /* DHD_DB0TS */
1861*4882a593Smuzhiyun 	bool extdtxs_in_txcpl;
1862*4882a593Smuzhiyun 	bool hostrdy_after_init;
1863*4882a593Smuzhiyun 	uint16 dhd_induce_error;
1864*4882a593Smuzhiyun 	uint16 dhd_induce_bh_error;
1865*4882a593Smuzhiyun 	int wlc_ver_major;
1866*4882a593Smuzhiyun 	int wlc_ver_minor;
1867*4882a593Smuzhiyun #ifdef DHD_PKTTS
1868*4882a593Smuzhiyun 	/* stores the packet meta data buffer length queried via iovar */
1869*4882a593Smuzhiyun 	uint16 pkt_metadata_version;
1870*4882a593Smuzhiyun 	uint16 pkt_metadata_buflen;
1871*4882a593Smuzhiyun #endif
1872*4882a593Smuzhiyun #ifdef SUPPORT_SET_TID
1873*4882a593Smuzhiyun 	uint8 tid_mode;
1874*4882a593Smuzhiyun 	uint32 target_uid;
1875*4882a593Smuzhiyun 	uint8 target_tid;
1876*4882a593Smuzhiyun #endif /* SUPPORT_SET_TID */
1877*4882a593Smuzhiyun #ifdef CONFIG_SILENT_ROAM
1878*4882a593Smuzhiyun 	bool sroam_turn_on;	/* Silent roam monitor enable flags */
1879*4882a593Smuzhiyun 	bool sroamed;		/* Silent roam monitor check flags */
1880*4882a593Smuzhiyun #endif /* CONFIG_SILENT_ROAM */
1881*4882a593Smuzhiyun #ifdef DHD_PKTDUMP_ROAM
1882*4882a593Smuzhiyun 	void *pktcnts;
1883*4882a593Smuzhiyun #endif /* DHD_PKTDUMP_ROAM */
1884*4882a593Smuzhiyun 	dhd_db7_info_t db7_trap;
1885*4882a593Smuzhiyun 	bool fw_preinit;
1886*4882a593Smuzhiyun 	bool ring_attached;
1887*4882a593Smuzhiyun #ifdef DHD_PCIE_RUNTIMEPM
1888*4882a593Smuzhiyun 	bool rx_pending_due_to_rpm;
1889*4882a593Smuzhiyun #endif /* DHD_PCIE_RUNTIMEPM */
1890*4882a593Smuzhiyun 	bool disable_dtim_in_suspend;	/* Disable set bcn_li_dtim in suspend */
1891*4882a593Smuzhiyun 	union {
1892*4882a593Smuzhiyun 		wl_roam_stats_v1_t v1;
1893*4882a593Smuzhiyun 	} roam_evt;
1894*4882a593Smuzhiyun 	bool arpoe_enable;
1895*4882a593Smuzhiyun 	bool arpol_configured;
1896*4882a593Smuzhiyun #ifdef DHD_TX_PROFILE
1897*4882a593Smuzhiyun 	bool tx_profile_enab;
1898*4882a593Smuzhiyun 	uint8 num_profiles;
1899*4882a593Smuzhiyun 	dhd_tx_profile_protocol_t *protocol_filters;
1900*4882a593Smuzhiyun #endif /* defined(DHD_TX_PROFILE) */
1901*4882a593Smuzhiyun #ifdef DHD_MEM_STATS
1902*4882a593Smuzhiyun 	void *mem_stats_lock;
1903*4882a593Smuzhiyun 	uint64 txpath_mem;
1904*4882a593Smuzhiyun 	uint64 rxpath_mem;
1905*4882a593Smuzhiyun #endif /* DHD_MEM_STATS */
1906*4882a593Smuzhiyun #ifdef DHD_LB_RXP
1907*4882a593Smuzhiyun 	atomic_t lb_rxp_flow_ctrl;
1908*4882a593Smuzhiyun 	uint32 lb_rxp_stop_thr;
1909*4882a593Smuzhiyun 	uint32 lb_rxp_strt_thr;
1910*4882a593Smuzhiyun #endif /* DHD_LB_RXP */
1911*4882a593Smuzhiyun #ifdef DHD_LB_STATS
1912*4882a593Smuzhiyun 	uint64 lb_rxp_stop_thr_hitcnt;
1913*4882a593Smuzhiyun 	uint64 lb_rxp_strt_thr_hitcnt;
1914*4882a593Smuzhiyun 	uint64 lb_rxp_napi_sched_cnt;
1915*4882a593Smuzhiyun 	uint64 lb_rxp_napi_complete_cnt;
1916*4882a593Smuzhiyun #endif /* DHD_LB_STATS */
1917*4882a593Smuzhiyun 	bool check_trap_rot;
1918*4882a593Smuzhiyun 	/* if FW supports host insertion of SFH LLC */
1919*4882a593Smuzhiyun 	bool host_sfhllc_supported;
1920*4882a593Smuzhiyun #ifdef DHD_GRO_ENABLE_HOST_CTRL
1921*4882a593Smuzhiyun 	bool permitted_gro;
1922*4882a593Smuzhiyun #endif /* DHD_GRO_ENABLE_HOST_CTRL */
1923*4882a593Smuzhiyun #ifdef CSI_SUPPORT
1924*4882a593Smuzhiyun 	struct list_head csi_list;
1925*4882a593Smuzhiyun 	int csi_count;
1926*4882a593Smuzhiyun #endif /* CSI_SUPPORT */
1927*4882a593Smuzhiyun 	char *clm_path;		/* module_param: path to clm vars file */
1928*4882a593Smuzhiyun 	char *conf_path;		/* module_param: path to config vars file */
1929*4882a593Smuzhiyun 	struct dhd_conf *conf;	/* Bus module handle */
1930*4882a593Smuzhiyun 	void *adapter;			/* adapter information, interrupt, fw path etc. */
1931*4882a593Smuzhiyun 	void *event_params;
1932*4882a593Smuzhiyun #ifdef WL_TIMER
1933*4882a593Smuzhiyun 	void *timer_params;
1934*4882a593Smuzhiyun #endif /* WL_TIMER */
1935*4882a593Smuzhiyun #ifdef BCMDBUS
1936*4882a593Smuzhiyun 	bool dhd_remove;
1937*4882a593Smuzhiyun #endif /* BCMDBUS */
1938*4882a593Smuzhiyun #ifdef WL_ESCAN
1939*4882a593Smuzhiyun 	struct wl_escan_info *escan;
1940*4882a593Smuzhiyun #endif
1941*4882a593Smuzhiyun #if defined(WL_WIRELESS_EXT)
1942*4882a593Smuzhiyun 	void *wext_info;
1943*4882a593Smuzhiyun #endif
1944*4882a593Smuzhiyun #ifdef WL_EXT_IAPSTA
1945*4882a593Smuzhiyun 	void *iapsta_params;
1946*4882a593Smuzhiyun #endif
1947*4882a593Smuzhiyun 	int hostsleep;
1948*4882a593Smuzhiyun #ifdef SENDPROB
1949*4882a593Smuzhiyun 	bool recv_probereq;
1950*4882a593Smuzhiyun #endif
1951*4882a593Smuzhiyun #ifdef DHD_NOTIFY_MAC_CHANGED
1952*4882a593Smuzhiyun 	bool skip_dhd_stop;
1953*4882a593Smuzhiyun #endif /* DHD_NOTIFY_MAC_CHANGED */
1954*4882a593Smuzhiyun #ifdef WL_EXT_GENL
1955*4882a593Smuzhiyun 	void *zconf;
1956*4882a593Smuzhiyun #endif
1957*4882a593Smuzhiyun } dhd_pub_t;
1958*4882a593Smuzhiyun 
1959*4882a593Smuzhiyun #if defined(__linux__)
1960*4882a593Smuzhiyun int dhd_wifi_platform_set_power(dhd_pub_t *pub, bool on);
1961*4882a593Smuzhiyun #else
dhd_wifi_platform_set_power(dhd_pub_t * pub,bool on)1962*4882a593Smuzhiyun static INLINE int dhd_wifi_platform_set_power(dhd_pub_t *pub, bool on)  { return 0; }
1963*4882a593Smuzhiyun #endif /* __linux__ */
1964*4882a593Smuzhiyun 
1965*4882a593Smuzhiyun typedef struct {
1966*4882a593Smuzhiyun 	uint rxwake;
1967*4882a593Smuzhiyun 	uint rcwake;
1968*4882a593Smuzhiyun #ifdef DHD_WAKE_RX_STATUS
1969*4882a593Smuzhiyun 	uint rx_bcast;
1970*4882a593Smuzhiyun 	uint rx_arp;
1971*4882a593Smuzhiyun 	uint rx_mcast;
1972*4882a593Smuzhiyun 	uint rx_multi_ipv6;
1973*4882a593Smuzhiyun 	uint rx_icmpv6;
1974*4882a593Smuzhiyun 	uint rx_icmpv6_ra;
1975*4882a593Smuzhiyun 	uint rx_icmpv6_na;
1976*4882a593Smuzhiyun 	uint rx_icmpv6_ns;
1977*4882a593Smuzhiyun 	uint rx_multi_ipv4;
1978*4882a593Smuzhiyun 	uint rx_multi_other;
1979*4882a593Smuzhiyun 	uint rx_ucast;
1980*4882a593Smuzhiyun #endif /* DHD_WAKE_RX_STATUS */
1981*4882a593Smuzhiyun #ifdef DHD_WAKE_EVENT_STATUS
1982*4882a593Smuzhiyun 	uint rc_event[WLC_E_LAST];
1983*4882a593Smuzhiyun #endif /* DHD_WAKE_EVENT_STATUS */
1984*4882a593Smuzhiyun } wake_counts_t;
1985*4882a593Smuzhiyun 
1986*4882a593Smuzhiyun #if defined(PCIE_FULL_DONGLE)
1987*4882a593Smuzhiyun /*
1988*4882a593Smuzhiyun  * XXX: WARNING: dhd_wlfc.h also defines a dhd_pkttag_t
1989*4882a593Smuzhiyun  * making wlfc incompatible with PCIE_FULL DONGLE
1990*4882a593Smuzhiyun  */
1991*4882a593Smuzhiyun 
1992*4882a593Smuzhiyun /* Packet Tag for PCIE Full Dongle DHD */
1993*4882a593Smuzhiyun typedef struct dhd_pkttag_fd {
1994*4882a593Smuzhiyun 	uint16    flowid;   /* Flowring Id */
1995*4882a593Smuzhiyun 	uint16    ifid;
1996*4882a593Smuzhiyun #ifdef DHD_SBN
1997*4882a593Smuzhiyun 	uint8	  pkt_udr;
1998*4882a593Smuzhiyun 	uint8	  pad;
1999*4882a593Smuzhiyun #endif /* DHD_SBN */
2000*4882a593Smuzhiyun #if defined(BCM_ROUTER_DHD) && defined(BCM_GMAC3)
2001*4882a593Smuzhiyun 	uint16    dataoff;  /* start of packet */
2002*4882a593Smuzhiyun #endif /* BCM_ROUTER_DHD && BCM_GMAC3 */
2003*4882a593Smuzhiyun #ifndef DHD_PCIE_PKTID
2004*4882a593Smuzhiyun 	uint16    dma_len;  /* pkt len for DMA_MAP/UNMAP */
2005*4882a593Smuzhiyun 	dmaaddr_t pa;       /* physical address */
2006*4882a593Smuzhiyun 	void      *dmah;    /* dma mapper handle */
2007*4882a593Smuzhiyun 	void      *secdma; /* secure dma sec_cma_info handle */
2008*4882a593Smuzhiyun #endif /* !DHD_PCIE_PKTID */
2009*4882a593Smuzhiyun #if defined(TX_STATUS_LATENCY_STATS) || defined(DHD_PKTTS)
2010*4882a593Smuzhiyun 	uint64	   q_time_us; /* time when tx pkt queued to flowring */
2011*4882a593Smuzhiyun #endif /* TX_STATUS_LATENCY_STATS || DHD_PKTTS */
2012*4882a593Smuzhiyun } dhd_pkttag_fd_t;
2013*4882a593Smuzhiyun 
2014*4882a593Smuzhiyun /* Packet Tag for DHD PCIE Full Dongle */
2015*4882a593Smuzhiyun #define DHD_PKTTAG_FD(pkt)          ((dhd_pkttag_fd_t *)(PKTTAG(pkt)))
2016*4882a593Smuzhiyun 
2017*4882a593Smuzhiyun #define DHD_PKT_GET_FLOWID(pkt)     ((DHD_PKTTAG_FD(pkt))->flowid)
2018*4882a593Smuzhiyun #define DHD_PKT_SET_FLOWID(pkt, pkt_flowid) \
2019*4882a593Smuzhiyun 	DHD_PKTTAG_FD(pkt)->flowid = (uint16)(pkt_flowid)
2020*4882a593Smuzhiyun 
2021*4882a593Smuzhiyun #define DHD_PKT_GET_DATAOFF(pkt)    ((DHD_PKTTAG_FD(pkt))->dataoff)
2022*4882a593Smuzhiyun #define DHD_PKT_SET_DATAOFF(pkt, pkt_dataoff) \
2023*4882a593Smuzhiyun 	DHD_PKTTAG_FD(pkt)->dataoff = (uint16)(pkt_dataoff)
2024*4882a593Smuzhiyun 
2025*4882a593Smuzhiyun #define DHD_PKT_GET_DMA_LEN(pkt)    ((DHD_PKTTAG_FD(pkt))->dma_len)
2026*4882a593Smuzhiyun #define DHD_PKT_SET_DMA_LEN(pkt, pkt_dma_len) \
2027*4882a593Smuzhiyun 	DHD_PKTTAG_FD(pkt)->dma_len = (uint16)(pkt_dma_len)
2028*4882a593Smuzhiyun 
2029*4882a593Smuzhiyun #define DHD_PKT_GET_PA(pkt)         ((DHD_PKTTAG_FD(pkt))->pa)
2030*4882a593Smuzhiyun #define DHD_PKT_SET_PA(pkt, pkt_pa) \
2031*4882a593Smuzhiyun 	DHD_PKTTAG_FD(pkt)->pa = (dmaaddr_t)(pkt_pa)
2032*4882a593Smuzhiyun 
2033*4882a593Smuzhiyun #define DHD_PKT_GET_DMAH(pkt)       ((DHD_PKTTAG_FD(pkt))->dmah)
2034*4882a593Smuzhiyun #define DHD_PKT_SET_DMAH(pkt, pkt_dmah) \
2035*4882a593Smuzhiyun 	DHD_PKTTAG_FD(pkt)->dmah = (void *)(pkt_dmah)
2036*4882a593Smuzhiyun 
2037*4882a593Smuzhiyun #define DHD_PKT_GET_SECDMA(pkt)    ((DHD_PKTTAG_FD(pkt))->secdma)
2038*4882a593Smuzhiyun #define DHD_PKT_SET_SECDMA(pkt, pkt_secdma) \
2039*4882a593Smuzhiyun 	DHD_PKTTAG_FD(pkt)->secdma = (void *)(pkt_secdma)
2040*4882a593Smuzhiyun 
2041*4882a593Smuzhiyun #if defined(TX_STATUS_LATENCY_STATS) || defined(DHD_PKTTS)
2042*4882a593Smuzhiyun #define DHD_PKT_GET_QTIME(pkt)    ((DHD_PKTTAG_FD(pkt))->q_time_us)
2043*4882a593Smuzhiyun #define DHD_PKT_SET_QTIME(pkt, pkt_q_time_us) \
2044*4882a593Smuzhiyun 	DHD_PKTTAG_FD(pkt)->q_time_us = (uint64)(pkt_q_time_us)
2045*4882a593Smuzhiyun #endif /* TX_STATUS_LATENCY_STATS || DHD_PKTTS */
2046*4882a593Smuzhiyun #endif /* PCIE_FULL_DONGLE */
2047*4882a593Smuzhiyun 
2048*4882a593Smuzhiyun #if defined(BCMWDF)
2049*4882a593Smuzhiyun typedef struct {
2050*4882a593Smuzhiyun 	dhd_pub_t *dhd_pub;
2051*4882a593Smuzhiyun } dhd_workitem_context_t;
2052*4882a593Smuzhiyun 
2053*4882a593Smuzhiyun WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(dhd_workitem_context_t, dhd_get_dhd_workitem_context)
2054*4882a593Smuzhiyun #endif /* (BCMWDF)  */
2055*4882a593Smuzhiyun 
2056*4882a593Smuzhiyun #if defined(LINUX) || defined(linux)
2057*4882a593Smuzhiyun #if defined(CONFIG_PM_SLEEP)
2058*4882a593Smuzhiyun 
2059*4882a593Smuzhiyun 	#define DHD_PM_RESUME_WAIT_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
2060*4882a593Smuzhiyun 	#define _DHD_PM_RESUME_WAIT(a, b) do {\
2061*4882a593Smuzhiyun 			int retry = 0; \
2062*4882a593Smuzhiyun 			SMP_RD_BARRIER_DEPENDS(); \
2063*4882a593Smuzhiyun 			while (dhd_mmc_suspend && retry++ != b) { \
2064*4882a593Smuzhiyun 				SMP_RD_BARRIER_DEPENDS(); \
2065*4882a593Smuzhiyun 				wait_event_interruptible_timeout(a, !dhd_mmc_suspend, 1); \
2066*4882a593Smuzhiyun 			} \
2067*4882a593Smuzhiyun 		} 	while (0)
2068*4882a593Smuzhiyun 	#define DHD_PM_RESUME_WAIT(a) 		_DHD_PM_RESUME_WAIT(a, 200)
2069*4882a593Smuzhiyun 	#define DHD_PM_RESUME_WAIT_FOREVER(a) 	_DHD_PM_RESUME_WAIT(a, ~0)
2070*4882a593Smuzhiyun 	#define DHD_PM_RESUME_RETURN_ERROR(a)   do { \
2071*4882a593Smuzhiyun 			if (dhd_mmc_suspend) { \
2072*4882a593Smuzhiyun 				printf("%s[%d]: mmc is still in suspend state!!!\n", \
2073*4882a593Smuzhiyun 					__FUNCTION__, __LINE__); \
2074*4882a593Smuzhiyun 				return a; \
2075*4882a593Smuzhiyun 			} \
2076*4882a593Smuzhiyun 		} while (0)
2077*4882a593Smuzhiyun 	#define DHD_PM_RESUME_RETURN		do { if (dhd_mmc_suspend) return; } while (0)
2078*4882a593Smuzhiyun 
2079*4882a593Smuzhiyun 	#define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
2080*4882a593Smuzhiyun 	#define SPINWAIT_SLEEP(a, exp, us) do { \
2081*4882a593Smuzhiyun 		uint countdown = (us) + 9999; \
2082*4882a593Smuzhiyun 		while ((exp) && (countdown >= 10000)) { \
2083*4882a593Smuzhiyun 			wait_event_interruptible_timeout(a, FALSE, 1); \
2084*4882a593Smuzhiyun 			countdown -= 10000; \
2085*4882a593Smuzhiyun 		} \
2086*4882a593Smuzhiyun 	} while (0)
2087*4882a593Smuzhiyun 
2088*4882a593Smuzhiyun #else
2089*4882a593Smuzhiyun 
2090*4882a593Smuzhiyun 	#define DHD_PM_RESUME_WAIT_INIT(a)
2091*4882a593Smuzhiyun 	#define DHD_PM_RESUME_WAIT(a)
2092*4882a593Smuzhiyun 	#define DHD_PM_RESUME_WAIT_FOREVER(a)
2093*4882a593Smuzhiyun 	#define DHD_PM_RESUME_RETURN_ERROR(a)
2094*4882a593Smuzhiyun 	#define DHD_PM_RESUME_RETURN
2095*4882a593Smuzhiyun 
2096*4882a593Smuzhiyun 	#define DHD_SPINWAIT_SLEEP_INIT(a)
2097*4882a593Smuzhiyun 	#define SPINWAIT_SLEEP(a, exp, us)  do { \
2098*4882a593Smuzhiyun 		uint countdown = (us) + 9; \
2099*4882a593Smuzhiyun 		while ((exp) && (countdown >= 10)) { \
2100*4882a593Smuzhiyun 			OSL_DELAY(10);  \
2101*4882a593Smuzhiyun 			countdown -= 10;  \
2102*4882a593Smuzhiyun 		} \
2103*4882a593Smuzhiyun 	} while (0)
2104*4882a593Smuzhiyun 
2105*4882a593Smuzhiyun #endif /* CONFIG_PM_SLEEP */
2106*4882a593Smuzhiyun #else
2107*4882a593Smuzhiyun 	#define DHD_SPINWAIT_SLEEP_INIT(a)
2108*4882a593Smuzhiyun 	#define SPINWAIT_SLEEP(a, exp, us)  do { \
2109*4882a593Smuzhiyun 		uint countdown = (us) + 9; \
2110*4882a593Smuzhiyun 		while ((exp) && (countdown >= 10)) { \
2111*4882a593Smuzhiyun 			OSL_DELAY(10);  \
2112*4882a593Smuzhiyun 			countdown -= 10;  \
2113*4882a593Smuzhiyun 		} \
2114*4882a593Smuzhiyun 	} while (0)
2115*4882a593Smuzhiyun #endif /* defined (LINUX) || defined(linux) */
2116*4882a593Smuzhiyun 
2117*4882a593Smuzhiyun #define DHD_IF_VIF	0x01	/* Virtual IF (Hidden from user) */
2118*4882a593Smuzhiyun 
2119*4882a593Smuzhiyun #ifdef PNO_SUPPORT
2120*4882a593Smuzhiyun int dhd_pno_clean(dhd_pub_t *dhd);
2121*4882a593Smuzhiyun #endif /* PNO_SUPPORT */
2122*4882a593Smuzhiyun 
2123*4882a593Smuzhiyun /*
2124*4882a593Smuzhiyun  *  Wake locks are an Android power management concept. They are used by applications and services
2125*4882a593Smuzhiyun  *  to request CPU resources.
2126*4882a593Smuzhiyun  */
2127*4882a593Smuzhiyun #if defined(linux) && defined(OEM_ANDROID)
2128*4882a593Smuzhiyun extern int dhd_os_wake_lock(dhd_pub_t *pub);
2129*4882a593Smuzhiyun extern int dhd_os_wake_unlock(dhd_pub_t *pub);
2130*4882a593Smuzhiyun extern int dhd_os_wake_lock_waive(dhd_pub_t *pub);
2131*4882a593Smuzhiyun extern int dhd_os_wake_lock_restore(dhd_pub_t *pub);
2132*4882a593Smuzhiyun extern void dhd_event_wake_lock(dhd_pub_t *pub);
2133*4882a593Smuzhiyun extern void dhd_event_wake_unlock(dhd_pub_t *pub);
2134*4882a593Smuzhiyun extern void dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val);
2135*4882a593Smuzhiyun extern void dhd_pm_wake_unlock(dhd_pub_t *pub);
2136*4882a593Smuzhiyun extern void dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val);
2137*4882a593Smuzhiyun extern void dhd_txfl_wake_unlock(dhd_pub_t *pub);
2138*4882a593Smuzhiyun extern void dhd_nan_wake_lock_timeout(dhd_pub_t *pub, int val);
2139*4882a593Smuzhiyun extern void dhd_nan_wake_unlock(dhd_pub_t *pub);
2140*4882a593Smuzhiyun extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub);
2141*4882a593Smuzhiyun extern int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val);
2142*4882a593Smuzhiyun extern int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val);
2143*4882a593Smuzhiyun extern int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub);
2144*4882a593Smuzhiyun extern int dhd_os_wd_wake_lock(dhd_pub_t *pub);
2145*4882a593Smuzhiyun extern int dhd_os_wd_wake_unlock(dhd_pub_t *pub);
2146*4882a593Smuzhiyun extern void dhd_os_wake_lock_init(struct dhd_info *dhd);
2147*4882a593Smuzhiyun extern void dhd_os_wake_lock_destroy(struct dhd_info *dhd);
2148*4882a593Smuzhiyun #ifdef DHD_USE_SCAN_WAKELOCK
2149*4882a593Smuzhiyun extern void dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val);
2150*4882a593Smuzhiyun extern void dhd_os_scan_wake_unlock(dhd_pub_t *pub);
2151*4882a593Smuzhiyun #endif /* BCMPCIE_SCAN_WAKELOCK */
2152*4882a593Smuzhiyun 
2153*4882a593Smuzhiyun #ifdef WLEASYMESH
2154*4882a593Smuzhiyun extern int dhd_get_1905_almac(dhd_pub_t *dhdp, uint8 ifidx, uint8* ea, bool mcast);
2155*4882a593Smuzhiyun extern int dhd_set_1905_almac(dhd_pub_t *dhdp, uint8 ifidx, uint8* ea, bool mcast);
2156*4882a593Smuzhiyun #endif /* WLEASYMESH */
2157*4882a593Smuzhiyun 
MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp)2158*4882a593Smuzhiyun inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp)
2159*4882a593Smuzhiyun {
2160*4882a593Smuzhiyun #if defined(OEM_ANDROID)
2161*4882a593Smuzhiyun 	mutex_init(&dhdp->wl_softap_lock);
2162*4882a593Smuzhiyun #endif /* OEM_ANDROID */
2163*4882a593Smuzhiyun }
2164*4882a593Smuzhiyun 
MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp)2165*4882a593Smuzhiyun inline static void MUTEX_LOCK_SOFTAP_SET(dhd_pub_t * dhdp)
2166*4882a593Smuzhiyun {
2167*4882a593Smuzhiyun #if defined(OEM_ANDROID)
2168*4882a593Smuzhiyun 	mutex_lock(&dhdp->wl_softap_lock);
2169*4882a593Smuzhiyun #endif /* OEM_ANDROID */
2170*4882a593Smuzhiyun }
2171*4882a593Smuzhiyun 
MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp)2172*4882a593Smuzhiyun inline static void MUTEX_UNLOCK_SOFTAP_SET(dhd_pub_t * dhdp)
2173*4882a593Smuzhiyun {
2174*4882a593Smuzhiyun #if defined(OEM_ANDROID)
2175*4882a593Smuzhiyun 	mutex_unlock(&dhdp->wl_softap_lock);
2176*4882a593Smuzhiyun #endif /* OEM_ANDROID */
2177*4882a593Smuzhiyun }
2178*4882a593Smuzhiyun 
2179*4882a593Smuzhiyun #ifdef DHD_DEBUG_WAKE_LOCK
2180*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK(pub) \
2181*4882a593Smuzhiyun 	do { \
2182*4882a593Smuzhiyun 		printf("call wake_lock: %s %d\n", \
2183*4882a593Smuzhiyun 			__FUNCTION__, __LINE__); \
2184*4882a593Smuzhiyun 		dhd_os_wake_lock(pub); \
2185*4882a593Smuzhiyun 	} while (0)
2186*4882a593Smuzhiyun #define DHD_OS_WAKE_UNLOCK(pub) \
2187*4882a593Smuzhiyun 	do { \
2188*4882a593Smuzhiyun 		printf("call wake_unlock: %s %d\n", \
2189*4882a593Smuzhiyun 			__FUNCTION__, __LINE__); \
2190*4882a593Smuzhiyun 		dhd_os_wake_unlock(pub); \
2191*4882a593Smuzhiyun 	} while (0)
2192*4882a593Smuzhiyun #define DHD_EVENT_WAKE_LOCK(pub) \
2193*4882a593Smuzhiyun 	do { \
2194*4882a593Smuzhiyun 		printf("call event wake_lock: %s %d\n", \
2195*4882a593Smuzhiyun 			__FUNCTION__, __LINE__); \
2196*4882a593Smuzhiyun 		dhd_event_wake_lock(pub); \
2197*4882a593Smuzhiyun 	} while (0)
2198*4882a593Smuzhiyun #define DHD_EVENT_WAKE_UNLOCK(pub) \
2199*4882a593Smuzhiyun 	do { \
2200*4882a593Smuzhiyun 		printf("call event wake_unlock: %s %d\n", \
2201*4882a593Smuzhiyun 			__FUNCTION__, __LINE__); \
2202*4882a593Smuzhiyun 		dhd_event_wake_unlock(pub); \
2203*4882a593Smuzhiyun 	} while (0)
2204*4882a593Smuzhiyun #define DHD_PM_WAKE_LOCK_TIMEOUT(pub, val) \
2205*4882a593Smuzhiyun 	do { \
2206*4882a593Smuzhiyun 		printf("call pm_wake_timeout enable\n"); \
2207*4882a593Smuzhiyun 	dhd_pm_wake_lock_timeout(pub, val); \
2208*4882a593Smuzhiyun 	} while (0)
2209*4882a593Smuzhiyun #define DHD_PM_WAKE_UNLOCK(pub) \
2210*4882a593Smuzhiyun 	do { \
2211*4882a593Smuzhiyun 		printf("call pm_wake unlock\n"); \
2212*4882a593Smuzhiyun 	dhd_pm_wake_unlock(pub); \
2213*4882a593Smuzhiyun 	} while (0)
2214*4882a593Smuzhiyun #define DHD_TXFL_WAKE_LOCK_TIMEOUT(pub, val) \
2215*4882a593Smuzhiyun 	do { \
2216*4882a593Smuzhiyun 		printf("call pm_wake_timeout enable\n"); \
2217*4882a593Smuzhiyun 		dhd_txfl_wake_lock_timeout(pub, val); \
2218*4882a593Smuzhiyun 	} while (0)
2219*4882a593Smuzhiyun #define DHD_TXFL_WAKE_UNLOCK(pub) \
2220*4882a593Smuzhiyun 	do { \
2221*4882a593Smuzhiyun 		printf("call pm_wake unlock\n"); \
2222*4882a593Smuzhiyun 		dhd_txfl_wake_unlock(pub); \
2223*4882a593Smuzhiyun 	} while (0)
2224*4882a593Smuzhiyun #define DHD_NAN_WAKE_LOCK_TIMEOUT(pub, val) \
2225*4882a593Smuzhiyun 	do { \
2226*4882a593Smuzhiyun 		printf("call pm_wake_timeout enable\n"); \
2227*4882a593Smuzhiyun 		dhd_nan_wake_lock_timeout(pub, val); \
2228*4882a593Smuzhiyun 	} while (0)
2229*4882a593Smuzhiyun #define DHD_NAN_WAKE_UNLOCK(pub) \
2230*4882a593Smuzhiyun 	do { \
2231*4882a593Smuzhiyun 		printf("call pm_wake unlock\n"); \
2232*4882a593Smuzhiyun 		dhd_nan_wake_unlock(pub); \
2233*4882a593Smuzhiyun 	} while (0)
2234*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_TIMEOUT(pub) \
2235*4882a593Smuzhiyun 	do { \
2236*4882a593Smuzhiyun 		printf("call wake_lock_timeout: %s %d\n", \
2237*4882a593Smuzhiyun 			__FUNCTION__, __LINE__); \
2238*4882a593Smuzhiyun 		dhd_os_wake_lock_timeout(pub); \
2239*4882a593Smuzhiyun 	} while (0)
2240*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) \
2241*4882a593Smuzhiyun 	do { \
2242*4882a593Smuzhiyun 		printf("call dhd_wake_lock_rx_timeout_enable[%d]: %s %d\n", \
2243*4882a593Smuzhiyun 			val, __FUNCTION__, __LINE__); \
2244*4882a593Smuzhiyun 		dhd_os_wake_lock_rx_timeout_enable(pub, val); \
2245*4882a593Smuzhiyun 	} while (0)
2246*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) \
2247*4882a593Smuzhiyun 	do { \
2248*4882a593Smuzhiyun 		printf("call dhd_wake_lock_ctrl_timeout_enable[%d]: %s %d\n", \
2249*4882a593Smuzhiyun 			val, __FUNCTION__, __LINE__); \
2250*4882a593Smuzhiyun 		dhd_os_wake_lock_ctrl_timeout_enable(pub, val); \
2251*4882a593Smuzhiyun 	} while (0)
2252*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub) \
2253*4882a593Smuzhiyun 	do { \
2254*4882a593Smuzhiyun 		printf("call dhd_wake_lock_ctrl_timeout_cancel: %s %d\n", \
2255*4882a593Smuzhiyun 			__FUNCTION__, __LINE__); \
2256*4882a593Smuzhiyun 		dhd_os_wake_lock_ctrl_timeout_cancel(pub); \
2257*4882a593Smuzhiyun 	} while (0)
2258*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_WAIVE(pub) \
2259*4882a593Smuzhiyun 	do { \
2260*4882a593Smuzhiyun 		printf("call dhd_wake_lock_waive: %s %d\n", \
2261*4882a593Smuzhiyun 			__FUNCTION__, __LINE__); \
2262*4882a593Smuzhiyun 		dhd_os_wake_lock_waive(pub); \
2263*4882a593Smuzhiyun 	} while (0)
2264*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_RESTORE(pub) \
2265*4882a593Smuzhiyun 	do { \
2266*4882a593Smuzhiyun 		printf("call dhd_wake_lock_restore: %s %d\n", \
2267*4882a593Smuzhiyun 			__FUNCTION__, __LINE__); \
2268*4882a593Smuzhiyun 		dhd_os_wake_lock_restore(pub); \
2269*4882a593Smuzhiyun 	} while (0)
2270*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_INIT(dhd) \
2271*4882a593Smuzhiyun 	do { \
2272*4882a593Smuzhiyun 		printf("call dhd_wake_lock_init: %s %d\n", \
2273*4882a593Smuzhiyun 			__FUNCTION__, __LINE__); \
2274*4882a593Smuzhiyun 		dhd_os_wake_lock_init(dhd); \
2275*4882a593Smuzhiyun 	} while (0)
2276*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_DESTROY(dhd) \
2277*4882a593Smuzhiyun 	do { \
2278*4882a593Smuzhiyun 		printf("call dhd_wake_dhd_lock_destroy: %s %d\n", \
2279*4882a593Smuzhiyun 			__FUNCTION__, __LINE__); \
2280*4882a593Smuzhiyun 		dhd_os_wake_lock_destroy(dhd); \
2281*4882a593Smuzhiyun 	} while (0)
2282*4882a593Smuzhiyun #else
2283*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK(pub)			dhd_os_wake_lock(pub)
2284*4882a593Smuzhiyun #define DHD_OS_WAKE_UNLOCK(pub)		dhd_os_wake_unlock(pub)
2285*4882a593Smuzhiyun #define DHD_EVENT_WAKE_LOCK(pub)			dhd_event_wake_lock(pub)
2286*4882a593Smuzhiyun #define DHD_EVENT_WAKE_UNLOCK(pub)		dhd_event_wake_unlock(pub)
2287*4882a593Smuzhiyun #define DHD_PM_WAKE_LOCK_TIMEOUT(pub, val)  dhd_pm_wake_lock_timeout(pub, val)
2288*4882a593Smuzhiyun #define DHD_PM_WAKE_UNLOCK(pub) 			dhd_pm_wake_unlock(pub)
2289*4882a593Smuzhiyun #define DHD_TXFL_WAKE_LOCK_TIMEOUT(pub, val)	dhd_txfl_wake_lock_timeout(pub, val)
2290*4882a593Smuzhiyun #define DHD_TXFL_WAKE_UNLOCK(pub) 			dhd_txfl_wake_unlock(pub)
2291*4882a593Smuzhiyun #define DHD_NAN_WAKE_LOCK_TIMEOUT(pub, val)	dhd_nan_wake_lock_timeout(pub, val)
2292*4882a593Smuzhiyun #define DHD_NAN_WAKE_UNLOCK(pub)		dhd_nan_wake_unlock(pub)
2293*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_TIMEOUT(pub)		dhd_os_wake_lock_timeout(pub)
2294*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) \
2295*4882a593Smuzhiyun 	dhd_os_wake_lock_rx_timeout_enable(pub, val)
2296*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) \
2297*4882a593Smuzhiyun 	dhd_os_wake_lock_ctrl_timeout_enable(pub, val)
2298*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub) \
2299*4882a593Smuzhiyun 	dhd_os_wake_lock_ctrl_timeout_cancel(pub)
2300*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_WAIVE(pub)			dhd_os_wake_lock_waive(pub)
2301*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_RESTORE(pub)		dhd_os_wake_lock_restore(pub)
2302*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_INIT(dhd)		dhd_os_wake_lock_init(dhd);
2303*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_DESTROY(dhd)		dhd_os_wake_lock_destroy(dhd);
2304*4882a593Smuzhiyun #endif /* DHD_DEBUG_WAKE_LOCK */
2305*4882a593Smuzhiyun 
2306*4882a593Smuzhiyun #define DHD_OS_WD_WAKE_LOCK(pub)		dhd_os_wd_wake_lock(pub)
2307*4882a593Smuzhiyun #define DHD_OS_WD_WAKE_UNLOCK(pub)		dhd_os_wd_wake_unlock(pub)
2308*4882a593Smuzhiyun 
2309*4882a593Smuzhiyun #ifdef DHD_USE_SCAN_WAKELOCK
2310*4882a593Smuzhiyun #ifdef DHD_DEBUG_SCAN_WAKELOCK
2311*4882a593Smuzhiyun #define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val) \
2312*4882a593Smuzhiyun 	do { \
2313*4882a593Smuzhiyun 		printf("call wake_lock_scan: %s %d\n", \
2314*4882a593Smuzhiyun 			__FUNCTION__, __LINE__); \
2315*4882a593Smuzhiyun 		dhd_os_scan_wake_lock_timeout(pub, val); \
2316*4882a593Smuzhiyun 	} while (0)
2317*4882a593Smuzhiyun #define DHD_OS_SCAN_WAKE_UNLOCK(pub) \
2318*4882a593Smuzhiyun 	do { \
2319*4882a593Smuzhiyun 		printf("call wake_unlock_scan: %s %d\n", \
2320*4882a593Smuzhiyun 			__FUNCTION__, __LINE__); \
2321*4882a593Smuzhiyun 		dhd_os_scan_wake_unlock(pub); \
2322*4882a593Smuzhiyun 	} while (0)
2323*4882a593Smuzhiyun #else
2324*4882a593Smuzhiyun #define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val)		dhd_os_scan_wake_lock_timeout(pub, val)
2325*4882a593Smuzhiyun #define DHD_OS_SCAN_WAKE_UNLOCK(pub)			dhd_os_scan_wake_unlock(pub)
2326*4882a593Smuzhiyun #endif /* DHD_DEBUG_SCAN_WAKELOCK */
2327*4882a593Smuzhiyun #else
2328*4882a593Smuzhiyun #define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val)
2329*4882a593Smuzhiyun #define DHD_OS_SCAN_WAKE_UNLOCK(pub)
2330*4882a593Smuzhiyun #endif /* DHD_USE_SCAN_WAKELOCK */
2331*4882a593Smuzhiyun 
2332*4882a593Smuzhiyun #else
2333*4882a593Smuzhiyun 
2334*4882a593Smuzhiyun /* Wake lock are used in Android only (until the Linux community accepts it) */
2335*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK(pub)
2336*4882a593Smuzhiyun #define DHD_OS_WAKE_UNLOCK(pub)
2337*4882a593Smuzhiyun #define DHD_EVENT_WAKE_LOCK(pub)
2338*4882a593Smuzhiyun #define DHD_EVENT_WAKE_UNLOCK(pub)
2339*4882a593Smuzhiyun #define DHD_PM_WAKE_LOCK_TIMEOUT(pub, val)
2340*4882a593Smuzhiyun #define DHD_PM_WAKE_UNLOCK(pub)
2341*4882a593Smuzhiyun #define DHD_TXFL_WAKE_LOCK_TIMEOUT(pub, val)
2342*4882a593Smuzhiyun #define DHD_TXFL_WAKE_UNLOCK(pub)
2343*4882a593Smuzhiyun #define DHD_NAN_WAKE_LOCK_TIMEOUT(pub, val)
2344*4882a593Smuzhiyun #define DHD_NAN_WAKE_UNLOCK(pub)
2345*4882a593Smuzhiyun #define DHD_OS_WD_WAKE_LOCK(pub)
2346*4882a593Smuzhiyun #define DHD_OS_WD_WAKE_UNLOCK(pub)
2347*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_TIMEOUT(pub)
2348*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val)	UNUSED_PARAMETER(val)
2349*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val)	UNUSED_PARAMETER(val)
2350*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub, val)
2351*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_WAIVE(pub)
2352*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_RESTORE(pub)
2353*4882a593Smuzhiyun #define DHD_OS_SCAN_WAKE_LOCK_TIMEOUT(pub, val)
2354*4882a593Smuzhiyun #define DHD_OS_SCAN_WAKE_UNLOCK(pub)
2355*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_INIT(dhd)
2356*4882a593Smuzhiyun #define DHD_OS_WAKE_LOCK_DESTROY(dhd)
2357*4882a593Smuzhiyun 
2358*4882a593Smuzhiyun #endif /* #defined(linux) && defined(OEM_ANDROID) */
2359*4882a593Smuzhiyun 
2360*4882a593Smuzhiyun #ifdef BCMPCIE_OOB_HOST_WAKE
2361*4882a593Smuzhiyun #define OOB_WAKE_LOCK_TIMEOUT 500
2362*4882a593Smuzhiyun extern void dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val);
2363*4882a593Smuzhiyun extern void dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub);
2364*4882a593Smuzhiyun 
2365*4882a593Smuzhiyun #define DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(pub, val)	dhd_os_oob_irq_wake_lock_timeout(pub, val)
2366*4882a593Smuzhiyun #define DHD_OS_OOB_IRQ_WAKE_UNLOCK(pub)			dhd_os_oob_irq_wake_unlock(pub)
2367*4882a593Smuzhiyun #endif /* BCMPCIE_OOB_HOST_WAKE */
2368*4882a593Smuzhiyun 
2369*4882a593Smuzhiyun #define DHD_PACKET_TIMEOUT_MS	500
2370*4882a593Smuzhiyun #define DHD_EVENT_TIMEOUT_MS	1500
2371*4882a593Smuzhiyun #define SCAN_WAKE_LOCK_TIMEOUT	10000
2372*4882a593Smuzhiyun #define MAX_TX_TIMEOUT			500
2373*4882a593Smuzhiyun 
2374*4882a593Smuzhiyun /* Enum for IOCTL recieved status */
2375*4882a593Smuzhiyun typedef enum dhd_ioctl_recieved_status
2376*4882a593Smuzhiyun {
2377*4882a593Smuzhiyun 	IOCTL_WAIT = 0,
2378*4882a593Smuzhiyun 	IOCTL_RETURN_ON_SUCCESS,
2379*4882a593Smuzhiyun 	IOCTL_RETURN_ON_TRAP,
2380*4882a593Smuzhiyun 	IOCTL_RETURN_ON_BUS_STOP,
2381*4882a593Smuzhiyun 	IOCTL_RETURN_ON_ERROR
2382*4882a593Smuzhiyun } dhd_ioctl_recieved_status_t;
2383*4882a593Smuzhiyun 
2384*4882a593Smuzhiyun /* interface operations (register, remove) should be atomic, use this lock to prevent race
2385*4882a593Smuzhiyun  * condition among wifi on/off and interface operation functions
2386*4882a593Smuzhiyun  */
2387*4882a593Smuzhiyun #if defined(LINUX)
2388*4882a593Smuzhiyun void dhd_net_if_lock(struct net_device *dev);
2389*4882a593Smuzhiyun void dhd_net_if_unlock(struct net_device *dev);
2390*4882a593Smuzhiyun #endif /* LINUX */
2391*4882a593Smuzhiyun 
2392*4882a593Smuzhiyun #if defined(LINUX) || defined(linux)
2393*4882a593Smuzhiyun #if defined(MULTIPLE_SUPPLICANT)
2394*4882a593Smuzhiyun extern void wl_android_post_init(void); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
2395*4882a593Smuzhiyun #endif /* MULTIPLE_SUPPLICANT */
2396*4882a593Smuzhiyun 
2397*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && defined(MULTIPLE_SUPPLICANT)
2398*4882a593Smuzhiyun extern struct mutex _dhd_mutex_lock_;
2399*4882a593Smuzhiyun #define DHD_MUTEX_IS_LOCK_RETURN() \
2400*4882a593Smuzhiyun 	if (mutex_is_locked(&_dhd_mutex_lock_) != 0) { \
2401*4882a593Smuzhiyun 		printf("%s : probe is already running! return.\n", __FUNCTION__); \
2402*4882a593Smuzhiyun 		return -EBUSY;; \
2403*4882a593Smuzhiyun 	}
2404*4882a593Smuzhiyun #define DHD_MUTEX_LOCK() \
2405*4882a593Smuzhiyun 	do { \
2406*4882a593Smuzhiyun 		if (mutex_is_locked(&_dhd_mutex_lock_) == 0) { \
2407*4882a593Smuzhiyun 			printf("%s : no mutex held\n", __FUNCTION__); \
2408*4882a593Smuzhiyun 		} else { \
2409*4882a593Smuzhiyun 			printf("%s : mutex is locked!. wait for unlocking\n", __FUNCTION__); \
2410*4882a593Smuzhiyun 		} \
2411*4882a593Smuzhiyun 		mutex_lock(&_dhd_mutex_lock_); \
2412*4882a593Smuzhiyun 		printf("%s : set mutex lock\n", __FUNCTION__); \
2413*4882a593Smuzhiyun 	} while (0)
2414*4882a593Smuzhiyun #define DHD_MUTEX_UNLOCK() \
2415*4882a593Smuzhiyun 	do { \
2416*4882a593Smuzhiyun 		printf("%s : mutex is released.\n", __FUNCTION__); \
2417*4882a593Smuzhiyun 		mutex_unlock(&_dhd_mutex_lock_); \
2418*4882a593Smuzhiyun 	} while (0)
2419*4882a593Smuzhiyun #else
2420*4882a593Smuzhiyun #define DHD_MUTEX_IS_LOCK_RETURN(a)	do {} while (0)
2421*4882a593Smuzhiyun #define DHD_MUTEX_LOCK(a)	do {} while (0)
2422*4882a593Smuzhiyun #define DHD_MUTEX_UNLOCK(a)	do {} while (0)
2423*4882a593Smuzhiyun #endif
2424*4882a593Smuzhiyun #endif /* defined (LINUX) || defined(linux) */
2425*4882a593Smuzhiyun 
2426*4882a593Smuzhiyun typedef enum dhd_attach_states
2427*4882a593Smuzhiyun {
2428*4882a593Smuzhiyun 	DHD_ATTACH_STATE_INIT = 0x0,
2429*4882a593Smuzhiyun 	DHD_ATTACH_STATE_NET_ALLOC = 0x1,
2430*4882a593Smuzhiyun 	DHD_ATTACH_STATE_DHD_ALLOC = 0x2,
2431*4882a593Smuzhiyun 	DHD_ATTACH_STATE_ADD_IF = 0x4,
2432*4882a593Smuzhiyun 	DHD_ATTACH_STATE_PROT_ATTACH = 0x8,
2433*4882a593Smuzhiyun 	DHD_ATTACH_STATE_WL_ATTACH = 0x10,
2434*4882a593Smuzhiyun 	DHD_ATTACH_STATE_THREADS_CREATED = 0x20,
2435*4882a593Smuzhiyun 	DHD_ATTACH_STATE_WAKELOCKS_INIT = 0x40,
2436*4882a593Smuzhiyun 	DHD_ATTACH_STATE_CFG80211 = 0x80,
2437*4882a593Smuzhiyun 	DHD_ATTACH_STATE_EARLYSUSPEND_DONE = 0x100,
2438*4882a593Smuzhiyun 	DHD_ATTACH_TIMESYNC_ATTACH_DONE = 0x200,
2439*4882a593Smuzhiyun 	DHD_ATTACH_LOGTRACE_INIT = 0x400,
2440*4882a593Smuzhiyun 	DHD_ATTACH_STATE_LB_ATTACH_DONE = 0x800,
2441*4882a593Smuzhiyun 	DHD_ATTACH_STATE_DONE = 0x1000
2442*4882a593Smuzhiyun } dhd_attach_states_t;
2443*4882a593Smuzhiyun 
2444*4882a593Smuzhiyun /* Value -1 means we are unsuccessful in creating the kthread. */
2445*4882a593Smuzhiyun #define DHD_PID_KT_INVALID		-1
2446*4882a593Smuzhiyun /* Value -2 means we are unsuccessful in both creating the kthread and tasklet */
2447*4882a593Smuzhiyun #define DHD_PID_KT_TL_INVALID		-2
2448*4882a593Smuzhiyun 
2449*4882a593Smuzhiyun /* default reporting period */
2450*4882a593Smuzhiyun #define ECOUNTERS_DEFAULT_PERIOD	0
2451*4882a593Smuzhiyun 
2452*4882a593Smuzhiyun /* default number of reports. '0' indicates forever */
2453*4882a593Smuzhiyun #define ECOUNTERS_NUM_REPORTS		0
2454*4882a593Smuzhiyun 
2455*4882a593Smuzhiyun typedef struct ecounters_cfg {
2456*4882a593Smuzhiyun 	uint16 type;
2457*4882a593Smuzhiyun 	uint16 if_slice_idx;
2458*4882a593Smuzhiyun 	uint16 stats_rep;
2459*4882a593Smuzhiyun } ecounters_cfg_t;
2460*4882a593Smuzhiyun 
2461*4882a593Smuzhiyun typedef struct event_ecounters_cfg {
2462*4882a593Smuzhiyun 	uint16 event_id;
2463*4882a593Smuzhiyun 	uint16 type;
2464*4882a593Smuzhiyun 	uint16 if_slice_idx;
2465*4882a593Smuzhiyun 	uint16 stats_rep;
2466*4882a593Smuzhiyun } event_ecounters_cfg_t;
2467*4882a593Smuzhiyun 
2468*4882a593Smuzhiyun typedef struct ecountersv2_xtlv_list_elt {
2469*4882a593Smuzhiyun 	/* Not quite the exact bcm_xtlv_t type as data could be pointing to other pieces in
2470*4882a593Smuzhiyun 	 * memory at the time of parsing arguments.
2471*4882a593Smuzhiyun 	 */
2472*4882a593Smuzhiyun 	uint16 id;
2473*4882a593Smuzhiyun 	uint16 len;
2474*4882a593Smuzhiyun 	uint8 *data;
2475*4882a593Smuzhiyun 	struct ecountersv2_xtlv_list_elt *next;
2476*4882a593Smuzhiyun } ecountersv2_xtlv_list_elt_t;
2477*4882a593Smuzhiyun 
2478*4882a593Smuzhiyun typedef struct ecountersv2_processed_xtlv_list_elt {
2479*4882a593Smuzhiyun 	uint8 *data;
2480*4882a593Smuzhiyun 	struct ecountersv2_processed_xtlv_list_elt *next;
2481*4882a593Smuzhiyun } ecountersv2_processed_xtlv_list_elt;
2482*4882a593Smuzhiyun 
2483*4882a593Smuzhiyun /*
2484*4882a593Smuzhiyun  * Exported from dhd OS modules (dhd_linux/dhd_ndis)
2485*4882a593Smuzhiyun  */
2486*4882a593Smuzhiyun 
2487*4882a593Smuzhiyun /* Indication from bus module regarding presence/insertion of dongle.
2488*4882a593Smuzhiyun  * Return dhd_pub_t pointer, used as handle to OS module in later calls.
2489*4882a593Smuzhiyun  * Returned structure should have bus and prot pointers filled in.
2490*4882a593Smuzhiyun  * bus_hdrlen specifies required headroom for bus module header.
2491*4882a593Smuzhiyun  */
2492*4882a593Smuzhiyun extern dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen
2493*4882a593Smuzhiyun #ifdef BCMDBUS
2494*4882a593Smuzhiyun 	, void *adapter
2495*4882a593Smuzhiyun #endif
2496*4882a593Smuzhiyun );
2497*4882a593Smuzhiyun extern int dhd_attach_net(dhd_pub_t *dhdp, bool need_rtnl_lock);
2498*4882a593Smuzhiyun #if defined(WLP2P) && defined(WL_CFG80211)
2499*4882a593Smuzhiyun /* To allow attach/detach calls corresponding to p2p0 interface  */
2500*4882a593Smuzhiyun extern int dhd_attach_p2p(dhd_pub_t *);
2501*4882a593Smuzhiyun extern int dhd_detach_p2p(dhd_pub_t *);
2502*4882a593Smuzhiyun #endif /* WLP2P && WL_CFG80211 */
2503*4882a593Smuzhiyun extern int dhd_register_if(dhd_pub_t *dhdp, int idx, bool need_rtnl_lock);
2504*4882a593Smuzhiyun 
2505*4882a593Smuzhiyun /* Indication from bus module regarding removal/absence of dongle */
2506*4882a593Smuzhiyun extern void dhd_detach(dhd_pub_t *dhdp);
2507*4882a593Smuzhiyun extern void dhd_free(dhd_pub_t *dhdp);
2508*4882a593Smuzhiyun extern void dhd_clear(dhd_pub_t *dhdp);
2509*4882a593Smuzhiyun 
2510*4882a593Smuzhiyun /* Indication from bus module to change flow-control state */
2511*4882a593Smuzhiyun extern void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool on);
2512*4882a593Smuzhiyun 
2513*4882a593Smuzhiyun #ifdef BCMDONGLEHOST
2514*4882a593Smuzhiyun /* Store the status of a connection attempt for later retrieval by an iovar */
2515*4882a593Smuzhiyun extern void dhd_store_conn_status(uint32 event, uint32 status, uint32 reason);
2516*4882a593Smuzhiyun #endif /* BCMDONGLEHOST */
2517*4882a593Smuzhiyun 
2518*4882a593Smuzhiyun extern bool dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec);
2519*4882a593Smuzhiyun 
2520*4882a593Smuzhiyun extern void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *rxp, int numpkt, uint8 chan);
2521*4882a593Smuzhiyun 
2522*4882a593Smuzhiyun /* Return pointer to interface name */
2523*4882a593Smuzhiyun extern char *dhd_ifname(dhd_pub_t *dhdp, int idx);
2524*4882a593Smuzhiyun 
2525*4882a593Smuzhiyun #ifdef DHD_UCODE_DOWNLOAD
2526*4882a593Smuzhiyun /* Returns the ucode path */
2527*4882a593Smuzhiyun extern char *dhd_get_ucode_path(dhd_pub_t *dhdp);
2528*4882a593Smuzhiyun #endif /* DHD_UCODE_DOWNLOAD */
2529*4882a593Smuzhiyun 
2530*4882a593Smuzhiyun /* Request scheduling of the bus dpc */
2531*4882a593Smuzhiyun extern void dhd_sched_dpc(dhd_pub_t *dhdp);
2532*4882a593Smuzhiyun 
2533*4882a593Smuzhiyun /* Notify tx completion */
2534*4882a593Smuzhiyun extern void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success);
2535*4882a593Smuzhiyun #ifdef DHD_4WAYM4_FAIL_DISCONNECT
2536*4882a593Smuzhiyun extern void dhd_eap_txcomplete(dhd_pub_t *dhdp, void *txp, bool success, int ifidx);
2537*4882a593Smuzhiyun extern void dhd_cleanup_m4_state_work(dhd_pub_t *dhdp, int ifidx);
2538*4882a593Smuzhiyun #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
2539*4882a593Smuzhiyun 
2540*4882a593Smuzhiyun #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2541*4882a593Smuzhiyun extern void dhd_bus_wakeup_work(dhd_pub_t *dhdp);
2542*4882a593Smuzhiyun #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2543*4882a593Smuzhiyun 
2544*4882a593Smuzhiyun #define WIFI_FEATURE_INFRA              0x0001      /* Basic infrastructure mode        */
2545*4882a593Smuzhiyun #define WIFI_FEATURE_INFRA_5G           0x0002      /* Support for 5 GHz Band           */
2546*4882a593Smuzhiyun #define WIFI_FEATURE_HOTSPOT            0x0004      /* Support for GAS/ANQP             */
2547*4882a593Smuzhiyun #define WIFI_FEATURE_P2P                0x0008      /* Wifi-Direct                      */
2548*4882a593Smuzhiyun #define WIFI_FEATURE_SOFT_AP            0x0010      /* Soft AP                          */
2549*4882a593Smuzhiyun #define WIFI_FEATURE_GSCAN              0x0020      /* Google-Scan APIs                 */
2550*4882a593Smuzhiyun #define WIFI_FEATURE_NAN                0x0040      /* Neighbor Awareness Networking    */
2551*4882a593Smuzhiyun #define WIFI_FEATURE_D2D_RTT            0x0080      /* Device-to-device RTT             */
2552*4882a593Smuzhiyun #define WIFI_FEATURE_D2AP_RTT           0x0100      /* Device-to-AP RTT                 */
2553*4882a593Smuzhiyun #define WIFI_FEATURE_BATCH_SCAN         0x0200      /* Batched Scan (legacy)            */
2554*4882a593Smuzhiyun #define WIFI_FEATURE_PNO                0x0400      /* Preferred network offload        */
2555*4882a593Smuzhiyun #define WIFI_FEATURE_ADDITIONAL_STA     0x0800      /* Support for two STAs             */
2556*4882a593Smuzhiyun #define WIFI_FEATURE_TDLS               0x1000      /* Tunnel directed link setup       */
2557*4882a593Smuzhiyun #define WIFI_FEATURE_TDLS_OFFCHANNEL    0x2000      /* Support for TDLS off channel     */
2558*4882a593Smuzhiyun #define WIFI_FEATURE_EPR                0x4000      /* Enhanced power reporting         */
2559*4882a593Smuzhiyun #define WIFI_FEATURE_AP_STA             0x8000      /* Support for AP STA Concurrency   */
2560*4882a593Smuzhiyun #define WIFI_FEATURE_LINKSTAT           0x10000     /* Support for Linkstats            */
2561*4882a593Smuzhiyun #define WIFI_FEATURE_LOGGER             0x20000     /* WiFi Logger			*/
2562*4882a593Smuzhiyun #define WIFI_FEATURE_HAL_EPNO           0x40000	    /* WiFi PNO enhanced                */
2563*4882a593Smuzhiyun #define WIFI_FEATURE_RSSI_MONITOR       0x80000     /* RSSI Monitor                     */
2564*4882a593Smuzhiyun #define WIFI_FEATURE_MKEEP_ALIVE        0x100000    /* WiFi mkeep_alive			*/
2565*4882a593Smuzhiyun #define WIFI_FEATURE_CONFIG_NDO         0x200000    /* ND offload configure             */
2566*4882a593Smuzhiyun #define WIFI_FEATURE_TX_TRANSMIT_POWER  0x400000    /* Capture Tx transmit power levels */
2567*4882a593Smuzhiyun #define WIFI_FEATURE_CONTROL_ROAMING    0x800000    /* Enable/Disable firmware roaming  */
2568*4882a593Smuzhiyun #define WIFI_FEATURE_FILTER_IE          0x1000000   /* Probe req ie filter              */
2569*4882a593Smuzhiyun #define WIFI_FEATURE_SCAN_RAND          0x2000000   /* MAC & Prb SN randomization       */
2570*4882a593Smuzhiyun #define WIFI_FEATURE_SET_TX_POWER_LIMIT 0x4000000   /* Support Tx Power Limit setting   */
2571*4882a593Smuzhiyun #define WIFI_FEATURE_USE_BODY_HEAD_SAR  0x8000000   /* Support Body/Head Proximity SAR  */
2572*4882a593Smuzhiyun #define WIFI_FEATURE_SET_LATENCY_MODE   0x40000000  /* Support Latency mode setting     */
2573*4882a593Smuzhiyun #define WIFI_FEATURE_P2P_RAND_MAC       0x80000000  /* Support P2P MAC randomization    */
2574*4882a593Smuzhiyun #define WIFI_FEATURE_INVALID            0xFFFFFFFF  /* Invalid Feature                  */
2575*4882a593Smuzhiyun 
2576*4882a593Smuzhiyun #define MAX_FEATURE_SET_CONCURRRENT_GROUPS  3
2577*4882a593Smuzhiyun 
2578*4882a593Smuzhiyun #if defined(linux) || defined(LINUX) || defined(OEM_ANDROID)
2579*4882a593Smuzhiyun extern int dhd_dev_get_feature_set(struct net_device *dev);
2580*4882a593Smuzhiyun extern int dhd_dev_get_feature_set_matrix(struct net_device *dev, int num);
2581*4882a593Smuzhiyun extern int dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui);
2582*4882a593Smuzhiyun extern int dhd_update_rand_mac_addr(dhd_pub_t *dhd);
2583*4882a593Smuzhiyun #ifdef CUSTOM_FORCE_NODFS_FLAG
2584*4882a593Smuzhiyun extern int dhd_dev_set_nodfs(struct net_device *dev, uint nodfs);
2585*4882a593Smuzhiyun #endif /* CUSTOM_FORCE_NODFS_FLAG */
2586*4882a593Smuzhiyun #ifdef NDO_CONFIG_SUPPORT
2587*4882a593Smuzhiyun #ifndef NDO_MAX_HOST_IP_ENTRIES
2588*4882a593Smuzhiyun #define NDO_MAX_HOST_IP_ENTRIES	10
2589*4882a593Smuzhiyun #endif /* NDO_MAX_HOST_IP_ENTRIES */
2590*4882a593Smuzhiyun 
2591*4882a593Smuzhiyun extern int dhd_dev_ndo_cfg(struct net_device *dev, u8 enable);
2592*4882a593Smuzhiyun extern int dhd_dev_ndo_update_inet6addr(struct net_device * dev);
2593*4882a593Smuzhiyun #endif /* NDO_CONFIG_SUPPORT */
2594*4882a593Smuzhiyun #endif /* (linux) || (LINUX) || (OEM_ANDROID) */
2595*4882a593Smuzhiyun extern int dhd_set_rand_mac_oui(dhd_pub_t *dhd);
2596*4882a593Smuzhiyun #ifdef GSCAN_SUPPORT
2597*4882a593Smuzhiyun extern int dhd_dev_set_lazy_roam_cfg(struct net_device *dev,
2598*4882a593Smuzhiyun              wlc_roam_exp_params_t *roam_param);
2599*4882a593Smuzhiyun extern int dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable);
2600*4882a593Smuzhiyun extern int dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev,
2601*4882a593Smuzhiyun        wl_bssid_pref_cfg_t *bssid_pref, uint32 flush);
2602*4882a593Smuzhiyun #endif /* GSCAN_SUPPORT */
2603*4882a593Smuzhiyun #if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT)
2604*4882a593Smuzhiyun extern int dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist,
2605*4882a593Smuzhiyun     uint32 len, uint32 flush);
2606*4882a593Smuzhiyun extern int dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *whitelist,
2607*4882a593Smuzhiyun     uint32 len, uint32 flush);
2608*4882a593Smuzhiyun #endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
2609*4882a593Smuzhiyun 
2610*4882a593Smuzhiyun /* OS independent layer functions */
2611*4882a593Smuzhiyun extern void dhd_os_dhdiovar_lock(dhd_pub_t *pub);
2612*4882a593Smuzhiyun extern void dhd_os_dhdiovar_unlock(dhd_pub_t *pub);
2613*4882a593Smuzhiyun void dhd_os_logdump_lock(dhd_pub_t *pub);
2614*4882a593Smuzhiyun void dhd_os_logdump_unlock(dhd_pub_t *pub);
2615*4882a593Smuzhiyun extern int dhd_os_proto_block(dhd_pub_t * pub);
2616*4882a593Smuzhiyun extern int dhd_os_proto_unblock(dhd_pub_t * pub);
2617*4882a593Smuzhiyun extern int dhd_os_ioctl_resp_wait(dhd_pub_t * pub, uint * condition);
2618*4882a593Smuzhiyun extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub);
2619*4882a593Smuzhiyun extern unsigned int dhd_os_get_ioctl_resp_timeout(void);
2620*4882a593Smuzhiyun extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec);
2621*4882a593Smuzhiyun extern void dhd_os_ioctl_resp_lock(dhd_pub_t * pub);
2622*4882a593Smuzhiyun extern void dhd_os_ioctl_resp_unlock(dhd_pub_t * pub);
2623*4882a593Smuzhiyun #ifdef PCIE_FULL_DONGLE
2624*4882a593Smuzhiyun extern void dhd_wakeup_ioctl_event(dhd_pub_t *pub, dhd_ioctl_recieved_status_t reason);
2625*4882a593Smuzhiyun #else
dhd_wakeup_ioctl_event(dhd_pub_t * pub,dhd_ioctl_recieved_status_t reason)2626*4882a593Smuzhiyun static INLINE void dhd_wakeup_ioctl_event(dhd_pub_t *pub, dhd_ioctl_recieved_status_t reason)
2627*4882a593Smuzhiyun { printf("%s is NOT implemented for SDIO", __FUNCTION__); return; }
2628*4882a593Smuzhiyun #endif
2629*4882a593Smuzhiyun #ifdef SHOW_LOGTRACE
2630*4882a593Smuzhiyun /* Bound and delay are fine tuned after several experiments and these
2631*4882a593Smuzhiyun  * are the best case values to handle bombarding of console logs.
2632*4882a593Smuzhiyun  */
2633*4882a593Smuzhiyun #define DHD_EVENT_LOGTRACE_BOUND 10u
2634*4882a593Smuzhiyun #define DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS 10u
2635*4882a593Smuzhiyun extern int dhd_os_read_file(void *file, char *buf, uint32 size);
2636*4882a593Smuzhiyun extern int dhd_os_seek_file(void *file, int64 offset);
2637*4882a593Smuzhiyun #endif /* SHOW_LOGTRACE */
2638*4882a593Smuzhiyun int dhd_os_write_file_posn(void *fp, unsigned long *posn,
2639*4882a593Smuzhiyun 		void *buf, unsigned long buflen);
2640*4882a593Smuzhiyun int dhd_msix_message_set(dhd_pub_t *dhdp, uint table_entry,
2641*4882a593Smuzhiyun     uint message_number, bool unmask);
2642*4882a593Smuzhiyun 
2643*4882a593Smuzhiyun #if defined(DHD_EFI)
2644*4882a593Smuzhiyun void dhd_os_set_intr_poll_period(struct dhd_bus *bus, unsigned int period_us);
2645*4882a593Smuzhiyun unsigned int dhd_os_get_intr_poll_period(void);
2646*4882a593Smuzhiyun int dhd_intr_poll(struct dhd_bus *bus, char *arg, int len, int set);
2647*4882a593Smuzhiyun #define INTR_POLL_PERIOD_CRITICAL 100 /* 100us -- in us */
2648*4882a593Smuzhiyun #define INTR_POLL_NPKTS_THRESH 1
2649*4882a593Smuzhiyun #define INTR_POLL_PKT_INTERVAL_THRESH 2000000 /* 2000ms -- in us */
2650*4882a593Smuzhiyun #if defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
2651*4882a593Smuzhiyun void dhd_intr_poll_pkt_thresholds(dhd_pub_t *dhd);
2652*4882a593Smuzhiyun #endif /* DHD_INTR_POLL_PERIOD_DYNAMIC */
2653*4882a593Smuzhiyun #endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
2654*4882a593Smuzhiyun 
2655*4882a593Smuzhiyun extern void
2656*4882a593Smuzhiyun dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr);
2657*4882a593Smuzhiyun extern void wl_dhdpcie_dump_regs(void * context);
2658*4882a593Smuzhiyun 
2659*4882a593Smuzhiyun #define DHD_OS_IOCTL_RESP_LOCK(x)
2660*4882a593Smuzhiyun #define DHD_OS_IOCTL_RESP_UNLOCK(x)
2661*4882a593Smuzhiyun 
2662*4882a593Smuzhiyun #if defined(NDIS)
2663*4882a593Smuzhiyun #define dhd_os_open_image(a) wl_os_open_image(a)
2664*4882a593Smuzhiyun #define dhd_os_close_image(a) wl_os_close_image(a)
2665*4882a593Smuzhiyun #define dhd_os_get_image_block(a, b, c) wl_os_get_image_block(a, b, c)
2666*4882a593Smuzhiyun #define dhd_os_get_image_size(a) wl_os_get_image_size(a)
2667*4882a593Smuzhiyun extern void dhd_os_wakeind(dhd_pub_t * pub, uint32 *val);
2668*4882a593Smuzhiyun extern void dhd_bus_check_died(void *bus);
2669*4882a593Smuzhiyun extern void pci_save_state(osl_t *osh, uint32 *buffer);
2670*4882a593Smuzhiyun extern void pci_restore_state(osl_t *osh, uint32 *buffer);
2671*4882a593Smuzhiyun #endif /* #if defined(NDIS) */
2672*4882a593Smuzhiyun 
2673*4882a593Smuzhiyun extern int dhd_os_get_image_block(char * buf, int len, void * image);
2674*4882a593Smuzhiyun extern int dhd_os_get_image_size(void * image);
2675*4882a593Smuzhiyun #if defined(BT_OVER_SDIO)
2676*4882a593Smuzhiyun extern int dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image);
2677*4882a593Smuzhiyun extern void dhdsdio_bus_usr_cnt_inc(dhd_pub_t *pub);
2678*4882a593Smuzhiyun extern void dhdsdio_bus_usr_cnt_dec(dhd_pub_t *pub);
2679*4882a593Smuzhiyun #endif /* (BT_OVER_SDIO) */
2680*4882a593Smuzhiyun extern void *dhd_os_open_image1(dhd_pub_t *pub, char *filename); /* rev1 function signature */
2681*4882a593Smuzhiyun extern void dhd_os_close_image1(dhd_pub_t *pub, void *image);
2682*4882a593Smuzhiyun extern void dhd_os_wd_timer(void *bus, uint wdtick);
2683*4882a593Smuzhiyun #ifdef DHD_PCIE_RUNTIMEPM
2684*4882a593Smuzhiyun extern void dhd_os_runtimepm_timer(void *bus, uint tick);
2685*4882a593Smuzhiyun #endif /* DHD_PCIE_RUNTIMEPM */
2686*4882a593Smuzhiyun extern void dhd_os_sdlock(dhd_pub_t * pub);
2687*4882a593Smuzhiyun extern void dhd_os_sdunlock(dhd_pub_t * pub);
2688*4882a593Smuzhiyun extern void dhd_os_sdlock_txq(dhd_pub_t * pub);
2689*4882a593Smuzhiyun extern void dhd_os_sdunlock_txq(dhd_pub_t * pub);
2690*4882a593Smuzhiyun extern unsigned long dhd_os_sdlock_txoff(dhd_pub_t * pub);
2691*4882a593Smuzhiyun extern void dhd_os_sdunlock_txoff(dhd_pub_t * pub, unsigned long flags);
2692*4882a593Smuzhiyun extern void dhd_os_sdlock_rxq(dhd_pub_t * pub);
2693*4882a593Smuzhiyun extern void dhd_os_sdunlock_rxq(dhd_pub_t * pub);
2694*4882a593Smuzhiyun extern void dhd_os_sdlock_sndup_rxq(dhd_pub_t * pub);
2695*4882a593Smuzhiyun extern void dhd_os_tracelog(const char *format, ...);
2696*4882a593Smuzhiyun #ifdef DHDTCPACK_SUPPRESS
2697*4882a593Smuzhiyun extern unsigned long dhd_os_tcpacklock(dhd_pub_t *pub);
2698*4882a593Smuzhiyun extern void dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags);
2699*4882a593Smuzhiyun #endif /* DHDTCPACK_SUPPRESS */
2700*4882a593Smuzhiyun 
2701*4882a593Smuzhiyun extern int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr);
2702*4882a593Smuzhiyun extern int dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff);
2703*4882a593Smuzhiyun extern int dhd_custom_get_mac_address(void *adapter, unsigned char *buf);
2704*4882a593Smuzhiyun #if defined(CUSTOM_COUNTRY_CODE)
2705*4882a593Smuzhiyun extern void get_customized_country_code(void *adapter, char *country_iso_code,
2706*4882a593Smuzhiyun 	wl_country_t *cspec, u32 flags);
2707*4882a593Smuzhiyun #else
2708*4882a593Smuzhiyun extern void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec);
2709*4882a593Smuzhiyun #endif /* CUSTOM_COUNTRY_CODE */
2710*4882a593Smuzhiyun extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub);
2711*4882a593Smuzhiyun extern void dhd_os_sdlock_eventq(dhd_pub_t * pub);
2712*4882a593Smuzhiyun extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub);
2713*4882a593Smuzhiyun extern bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret);
2714*4882a593Smuzhiyun extern int dhd_os_send_hang_message(dhd_pub_t *dhdp);
2715*4882a593Smuzhiyun extern void dhd_set_version_info(dhd_pub_t *pub, char *fw);
2716*4882a593Smuzhiyun extern bool dhd_os_check_if_up(dhd_pub_t *pub);
2717*4882a593Smuzhiyun extern int dhd_os_check_wakelock(dhd_pub_t *pub);
2718*4882a593Smuzhiyun extern int dhd_os_check_wakelock_all(dhd_pub_t *pub);
2719*4882a593Smuzhiyun extern int dhd_get_instance(dhd_pub_t *pub);
2720*4882a593Smuzhiyun #ifdef CUSTOM_SET_CPUCORE
2721*4882a593Smuzhiyun extern void dhd_set_cpucore(dhd_pub_t *dhd, int set);
2722*4882a593Smuzhiyun #endif /* CUSTOM_SET_CPUCORE */
2723*4882a593Smuzhiyun 
2724*4882a593Smuzhiyun #if defined(KEEP_ALIVE)
2725*4882a593Smuzhiyun extern int dhd_keep_alive_onoff(dhd_pub_t *dhd);
2726*4882a593Smuzhiyun #endif /* KEEP_ALIVE */
2727*4882a593Smuzhiyun 
2728*4882a593Smuzhiyun #if defined(DHD_FW_COREDUMP)
2729*4882a593Smuzhiyun void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size);
2730*4882a593Smuzhiyun #endif /* DHD_FW_COREDUMP */
2731*4882a593Smuzhiyun 
2732*4882a593Smuzhiyun #if defined(linux) || defined(LINUX)
2733*4882a593Smuzhiyun int dhd_os_get_img_fwreq(const struct firmware **fw, char *file_path);
2734*4882a593Smuzhiyun void dhd_os_close_img_fwreq(const struct firmware *fw);
2735*4882a593Smuzhiyun #if defined(DHD_SSSR_DUMP)
2736*4882a593Smuzhiyun void dhd_write_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode);
2737*4882a593Smuzhiyun #endif /* DHD_SSSR_DUMP */
2738*4882a593Smuzhiyun #ifdef DNGL_AXI_ERROR_LOGGING
2739*4882a593Smuzhiyun void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type);
2740*4882a593Smuzhiyun #endif /* DNGL_AXI_ERROR_LOGGING */
2741*4882a593Smuzhiyun #ifdef BCMPCIE
2742*4882a593Smuzhiyun void dhd_schedule_cto_recovery(dhd_pub_t *dhdp);
2743*4882a593Smuzhiyun #endif /* BCMPCIE */
2744*4882a593Smuzhiyun #else
2745*4882a593Smuzhiyun #if defined(DHD_SSSR_DUMP)
dhd_write_sssr_dump(dhd_pub_t * dhd,uint32 dump_mode)2746*4882a593Smuzhiyun static INLINE void dhd_write_sssr_dump(dhd_pub_t *dhd, uint32 dump_mode) { return; }
2747*4882a593Smuzhiyun #endif /* DHD_SSSR_DUMP */
2748*4882a593Smuzhiyun #ifdef DNGL_AXI_ERROR_LOGGING
dhd_schedule_axi_error_dump(dhd_pub_t * dhdp,void * type)2749*4882a593Smuzhiyun static INLINE void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type) { return; }
2750*4882a593Smuzhiyun #endif /* DNGL_AXI_ERROR_LOGGING */
2751*4882a593Smuzhiyun /* For non-linux map dhd_schedule_cto_recovery to dhdpcie_cto_recovery_handler */
2752*4882a593Smuzhiyun #ifdef BCMPCIE
2753*4882a593Smuzhiyun #define dhd_schedule_cto_recovery(dhdp) dhdpcie_cto_recovery_handler(dhdp)
2754*4882a593Smuzhiyun #endif /* BCMPCIE */
2755*4882a593Smuzhiyun #endif /* linux || LINUX */
2756*4882a593Smuzhiyun 
2757*4882a593Smuzhiyun #ifdef EWP_EDL
2758*4882a593Smuzhiyun #define EDL_SCHEDULE_DELAY 500 /* 500ms */
2759*4882a593Smuzhiyun #if defined(linux) || defined(LINUX)
2760*4882a593Smuzhiyun void dhd_schedule_edl_work(dhd_pub_t *dhdp, uint delay_ms);
2761*4882a593Smuzhiyun #else
dhd_schedule_edl_work(dhd_pub_t * dhd,uint delay_ms)2762*4882a593Smuzhiyun static INLINE void dhd_schedule_edl_work(dhd_pub_t *dhd, uint delay_ms) { return; }
2763*4882a593Smuzhiyun #endif /* linux || LINUX */
2764*4882a593Smuzhiyun #endif /* EWP_EDL */
2765*4882a593Smuzhiyun 
2766*4882a593Smuzhiyun #ifdef SUPPORT_AP_POWERSAVE
2767*4882a593Smuzhiyun extern int dhd_set_ap_powersave(dhd_pub_t *dhdp, int ifidx, int enable);
2768*4882a593Smuzhiyun #endif /* SUPPORT_AP_POWERSAVE */
2769*4882a593Smuzhiyun 
2770*4882a593Smuzhiyun #ifdef PKT_FILTER_SUPPORT
2771*4882a593Smuzhiyun #define DHD_UNICAST_FILTER_NUM		0
2772*4882a593Smuzhiyun #define DHD_BROADCAST_FILTER_NUM	1
2773*4882a593Smuzhiyun #define DHD_MULTICAST4_FILTER_NUM	2
2774*4882a593Smuzhiyun #define DHD_MULTICAST6_FILTER_NUM	3
2775*4882a593Smuzhiyun #define DHD_MDNS_FILTER_NUM		4
2776*4882a593Smuzhiyun #define DHD_ARP_FILTER_NUM		5
2777*4882a593Smuzhiyun #define DHD_BROADCAST_ARP_FILTER_NUM	6
2778*4882a593Smuzhiyun #define DHD_IP4BCAST_DROP_FILTER_NUM	7
2779*4882a593Smuzhiyun #define DHD_LLC_STP_DROP_FILTER_NUM	8
2780*4882a593Smuzhiyun #define DHD_LLC_XID_DROP_FILTER_NUM	9
2781*4882a593Smuzhiyun #define DHD_UDPNETBIOS_DROP_FILTER_NUM	10
2782*4882a593Smuzhiyun #define DISCARD_IPV4_MCAST	"102 1 6 IP4_H:16 0xf0 0xe0"
2783*4882a593Smuzhiyun #define DISCARD_IPV6_MCAST	"103 1 6 IP6_H:24 0xff 0xff"
2784*4882a593Smuzhiyun #define DISCARD_IPV4_BCAST	"107 1 6 IP4_H:16 0xffffffff 0xffffffff"
2785*4882a593Smuzhiyun #define DISCARD_LLC_STP		"108 1 6 ETH_H:14 0xFFFFFFFFFFFF 0xAAAA0300000C"
2786*4882a593Smuzhiyun #define DISCARD_LLC_XID		"109 1 6 ETH_H:14 0xFFFFFF 0x0001AF"
2787*4882a593Smuzhiyun #define DISCARD_UDPNETBIOS	"110 1 6 UDP_H:2 0xffff 0x0089"
2788*4882a593Smuzhiyun extern int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val);
2789*4882a593Smuzhiyun extern void dhd_enable_packet_filter(int value, dhd_pub_t *dhd);
2790*4882a593Smuzhiyun extern int dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num);
2791*4882a593Smuzhiyun extern int net_os_enable_packet_filter(struct net_device *dev, int val);
2792*4882a593Smuzhiyun extern int net_os_rxfilter_add_remove(struct net_device *dev, int val, int num);
2793*4882a593Smuzhiyun extern int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val);
2794*4882a593Smuzhiyun 
2795*4882a593Smuzhiyun #define MAX_PKTFLT_BUF_SIZE		2048
2796*4882a593Smuzhiyun #define MAX_PKTFLT_FIXED_PATTERN_SIZE	32
2797*4882a593Smuzhiyun #define MAX_PKTFLT_FIXED_BUF_SIZE	\
2798*4882a593Smuzhiyun 	(WL_PKT_FILTER_FIXED_LEN + MAX_PKTFLT_FIXED_PATTERN_SIZE * 2)
2799*4882a593Smuzhiyun #define MAXPKT_ARG	16
2800*4882a593Smuzhiyun #endif /* PKT_FILTER_SUPPORT */
2801*4882a593Smuzhiyun 
2802*4882a593Smuzhiyun #if defined(OEM_ANDROID) && defined(BCMPCIE)
2803*4882a593Smuzhiyun extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval);
2804*4882a593Smuzhiyun #else
2805*4882a593Smuzhiyun extern int dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd);
2806*4882a593Smuzhiyun #endif /* OEM_ANDROID && BCMPCIE */
2807*4882a593Smuzhiyun 
2808*4882a593Smuzhiyun extern bool dhd_support_sta_mode(dhd_pub_t *dhd);
2809*4882a593Smuzhiyun extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size);
2810*4882a593Smuzhiyun 
2811*4882a593Smuzhiyun #ifdef RSSI_MONITOR_SUPPORT
2812*4882a593Smuzhiyun extern int dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start,
2813*4882a593Smuzhiyun              int8 max_rssi, int8 min_rssi);
2814*4882a593Smuzhiyun #endif /* RSSI_MONITOR_SUPPORT */
2815*4882a593Smuzhiyun 
2816*4882a593Smuzhiyun #ifdef DHDTCPACK_SUPPRESS
2817*4882a593Smuzhiyun int dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable);
2818*4882a593Smuzhiyun #endif /* DHDTCPACK_SUPPRESS */
2819*4882a593Smuzhiyun 
2820*4882a593Smuzhiyun #define DHD_RSSI_MONITOR_EVT_VERSION   1
2821*4882a593Smuzhiyun typedef struct {
2822*4882a593Smuzhiyun 	uint8 version;
2823*4882a593Smuzhiyun 	int8 cur_rssi;
2824*4882a593Smuzhiyun 	struct ether_addr BSSID;
2825*4882a593Smuzhiyun } dhd_rssi_monitor_evt_t;
2826*4882a593Smuzhiyun 
2827*4882a593Smuzhiyun typedef struct {
2828*4882a593Smuzhiyun 	uint32 limit;		/* Expiration time (usec) */
2829*4882a593Smuzhiyun 	uint32 increment;	/* Current expiration increment (usec) */
2830*4882a593Smuzhiyun 	uint32 elapsed;		/* Current elapsed time (usec) */
2831*4882a593Smuzhiyun 	uint32 tick;		/* O/S tick time (usec) */
2832*4882a593Smuzhiyun } dhd_timeout_t;
2833*4882a593Smuzhiyun 
2834*4882a593Smuzhiyun #ifdef SHOW_LOGTRACE
2835*4882a593Smuzhiyun typedef struct {
2836*4882a593Smuzhiyun 	uint  num_fmts;
2837*4882a593Smuzhiyun 	char **fmts;
2838*4882a593Smuzhiyun 	char *raw_fmts;
2839*4882a593Smuzhiyun 	char *raw_sstr;
2840*4882a593Smuzhiyun 	uint32 fmts_size;
2841*4882a593Smuzhiyun 	uint32 raw_fmts_size;
2842*4882a593Smuzhiyun 	uint32 raw_sstr_size;
2843*4882a593Smuzhiyun 	uint32 ramstart;
2844*4882a593Smuzhiyun 	uint32 rodata_start;
2845*4882a593Smuzhiyun 	uint32 rodata_end;
2846*4882a593Smuzhiyun 	char *rom_raw_sstr;
2847*4882a593Smuzhiyun 	uint32 rom_raw_sstr_size;
2848*4882a593Smuzhiyun 	uint32 rom_ramstart;
2849*4882a593Smuzhiyun 	uint32 rom_rodata_start;
2850*4882a593Smuzhiyun 	uint32 rom_rodata_end;
2851*4882a593Smuzhiyun } dhd_event_log_t;
2852*4882a593Smuzhiyun #endif /* SHOW_LOGTRACE */
2853*4882a593Smuzhiyun 
2854*4882a593Smuzhiyun #if defined(PKT_FILTER_SUPPORT) && defined(APF)
2855*4882a593Smuzhiyun /*
2856*4882a593Smuzhiyun  * As per Google's current implementation, there will be only one APF filter.
2857*4882a593Smuzhiyun  * Therefore, userspace doesn't bother about filter id and because of that
2858*4882a593Smuzhiyun  * DHD has to manage the filter id.
2859*4882a593Smuzhiyun  */
2860*4882a593Smuzhiyun #define PKT_FILTER_APF_ID		200
2861*4882a593Smuzhiyun #define DHD_APF_LOCK(ndev)		dhd_apf_lock(ndev)
2862*4882a593Smuzhiyun #define DHD_APF_UNLOCK(ndev)		dhd_apf_unlock(ndev)
2863*4882a593Smuzhiyun 
2864*4882a593Smuzhiyun extern void dhd_apf_lock(struct net_device *dev);
2865*4882a593Smuzhiyun extern void dhd_apf_unlock(struct net_device *dev);
2866*4882a593Smuzhiyun extern int dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version);
2867*4882a593Smuzhiyun extern int dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len);
2868*4882a593Smuzhiyun extern int dhd_dev_apf_add_filter(struct net_device *ndev, u8* program,
2869*4882a593Smuzhiyun 	uint32 program_len);
2870*4882a593Smuzhiyun extern int dhd_dev_apf_enable_filter(struct net_device *ndev);
2871*4882a593Smuzhiyun extern int dhd_dev_apf_disable_filter(struct net_device *ndev);
2872*4882a593Smuzhiyun extern int dhd_dev_apf_delete_filter(struct net_device *ndev);
2873*4882a593Smuzhiyun #endif /* PKT_FILTER_SUPPORT && APF */
2874*4882a593Smuzhiyun 
2875*4882a593Smuzhiyun extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec);
2876*4882a593Smuzhiyun extern int dhd_timeout_expired(dhd_timeout_t *tmo);
2877*4882a593Smuzhiyun 
2878*4882a593Smuzhiyun extern int dhd_ifname2idx(struct dhd_info *dhd, char *name);
2879*4882a593Smuzhiyun #ifdef LINUX
2880*4882a593Smuzhiyun extern int dhd_net2idx(struct dhd_info *dhd, struct net_device *net);
2881*4882a593Smuzhiyun extern struct net_device * dhd_idx2net(void *pub, int ifidx);
2882*4882a593Smuzhiyun extern int net_os_send_hang_message(struct net_device *dev);
2883*4882a593Smuzhiyun extern int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num);
2884*4882a593Smuzhiyun #endif
2885*4882a593Smuzhiyun extern bool dhd_wowl_cap(void *bus);
2886*4882a593Smuzhiyun extern int wl_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata, uint pktlen,
2887*4882a593Smuzhiyun 	wl_event_msg_t *, void **data_ptr,  void *);
2888*4882a593Smuzhiyun extern int wl_process_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata, uint pktlen,
2889*4882a593Smuzhiyun 	wl_event_msg_t *, void **data_ptr,  void *);
2890*4882a593Smuzhiyun extern void wl_event_to_host_order(wl_event_msg_t * evt);
2891*4882a593Smuzhiyun extern int wl_host_event_get_data(void *pktdata, uint pktlen, bcm_event_msg_u_t *evu);
2892*4882a593Smuzhiyun extern int dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len);
2893*4882a593Smuzhiyun extern int dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set,
2894*4882a593Smuzhiyun                             int ifindex);
2895*4882a593Smuzhiyun extern int dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval,
2896*4882a593Smuzhiyun 	int cmd, uint8 set, int ifidx);
2897*4882a593Smuzhiyun extern int dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val,
2898*4882a593Smuzhiyun 	int cmd, uint8 set, int ifidx);
2899*4882a593Smuzhiyun extern void dhd_common_init(osl_t *osh);
2900*4882a593Smuzhiyun 
2901*4882a593Smuzhiyun #if defined(linux) || defined(LINUX) || defined(OEM_ANDROID)
2902*4882a593Smuzhiyun extern int dhd_do_driver_init(struct net_device *net);
2903*4882a593Smuzhiyun #endif
2904*4882a593Smuzhiyun extern int dhd_event_ifadd(struct dhd_info *dhd, struct wl_event_data_if *ifevent,
2905*4882a593Smuzhiyun 	char *name, uint8 *mac);
2906*4882a593Smuzhiyun extern int dhd_event_ifdel(struct dhd_info *dhd, struct wl_event_data_if *ifevent,
2907*4882a593Smuzhiyun 	char *name, uint8 *mac);
2908*4882a593Smuzhiyun extern int dhd_event_ifchange(struct dhd_info *dhd, struct wl_event_data_if *ifevent,
2909*4882a593Smuzhiyun        char *name, uint8 *mac);
2910*4882a593Smuzhiyun #ifdef DHD_UPDATE_INTF_MAC
2911*4882a593Smuzhiyun extern int dhd_op_if_update(dhd_pub_t *dhdpub, int ifidx);
2912*4882a593Smuzhiyun #endif /* DHD_UPDATE_INTF_MAC */
2913*4882a593Smuzhiyun extern struct net_device* dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name,
2914*4882a593Smuzhiyun 	uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name);
2915*4882a593Smuzhiyun extern int dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock);
2916*4882a593Smuzhiyun #ifdef WL_STATIC_IF
2917*4882a593Smuzhiyun extern s32 dhd_update_iflist_info(dhd_pub_t *dhdp, struct net_device *ndev, int ifidx,
2918*4882a593Smuzhiyun 	uint8 *mac, uint8 bssidx, const char *dngl_name, int if_state);
2919*4882a593Smuzhiyun #endif /* WL_STATIC_IF */
2920*4882a593Smuzhiyun extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name);
2921*4882a593Smuzhiyun extern void dhd_vif_del(struct dhd_info *dhd, int ifidx);
2922*4882a593Smuzhiyun extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx);
2923*4882a593Smuzhiyun extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, uchar *cp, int len);
2924*4882a593Smuzhiyun 
2925*4882a593Smuzhiyun #ifdef WL_NATOE
2926*4882a593Smuzhiyun extern int dhd_natoe_ct_event(dhd_pub_t *dhd, char *data);
2927*4882a593Smuzhiyun #endif /* WL_NATOE */
2928*4882a593Smuzhiyun 
2929*4882a593Smuzhiyun /* Send packet to dongle via data channel */
2930*4882a593Smuzhiyun extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt);
2931*4882a593Smuzhiyun 
2932*4882a593Smuzhiyun /* send up locally generated event */
2933*4882a593Smuzhiyun extern void dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
2934*4882a593Smuzhiyun /* Send event to host */
2935*4882a593Smuzhiyun extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
2936*4882a593Smuzhiyun #ifdef LOG_INTO_TCPDUMP
2937*4882a593Smuzhiyun extern void dhd_sendup_log(dhd_pub_t *dhdp, void *data, int len);
2938*4882a593Smuzhiyun #endif /* LOG_INTO_TCPDUMP */
2939*4882a593Smuzhiyun #if defined(SHOW_LOGTRACE) && defined(EWP_EDL)
2940*4882a593Smuzhiyun void dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg);
2941*4882a593Smuzhiyun #endif
2942*4882a593Smuzhiyun #if defined(WIFI_TURNON_USE_HALINIT)
2943*4882a593Smuzhiyun extern int dhd_open(struct net_device *net);
2944*4882a593Smuzhiyun #endif /* WIFI_TURNON_USE_HALINIT */
2945*4882a593Smuzhiyun extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag);
2946*4882a593Smuzhiyun extern uint dhd_bus_status(dhd_pub_t *dhdp);
2947*4882a593Smuzhiyun extern int  dhd_bus_start(dhd_pub_t *dhdp);
2948*4882a593Smuzhiyun extern int dhd_bus_suspend(dhd_pub_t *dhdpub);
2949*4882a593Smuzhiyun extern int dhd_bus_resume(dhd_pub_t *dhdpub, int stage);
2950*4882a593Smuzhiyun extern int dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size);
2951*4882a593Smuzhiyun extern void dhd_print_buf(void *pbuf, int len, int bytes_per_line);
2952*4882a593Smuzhiyun extern bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval);
2953*4882a593Smuzhiyun #if defined(BCMSDIO) || defined(BCMPCIE)
2954*4882a593Smuzhiyun extern uint dhd_bus_chip_id(dhd_pub_t *dhdp);
2955*4882a593Smuzhiyun extern uint dhd_bus_chiprev_id(dhd_pub_t *dhdp);
2956*4882a593Smuzhiyun extern uint dhd_bus_chippkg_id(dhd_pub_t *dhdp);
2957*4882a593Smuzhiyun #endif /* defined(BCMSDIO) || defined(BCMPCIE) */
2958*4882a593Smuzhiyun #if defined(LINUX) || defined(linux)
2959*4882a593Smuzhiyun int dhd_bus_get_fw_mode(dhd_pub_t *dhdp);
2960*4882a593Smuzhiyun #else
dhd_bus_get_fw_mode(dhd_pub_t * dhdp)2961*4882a593Smuzhiyun static INLINE int dhd_bus_get_fw_mode(dhd_pub_t *dhdp) { return 0; }
2962*4882a593Smuzhiyun #endif /* LINUX || linux */
2963*4882a593Smuzhiyun 
2964*4882a593Smuzhiyun #if defined(KEEP_ALIVE)
2965*4882a593Smuzhiyun extern int dhd_keep_alive_onoff(dhd_pub_t *dhd);
2966*4882a593Smuzhiyun #endif /* KEEP_ALIVE */
2967*4882a593Smuzhiyun 
2968*4882a593Smuzhiyun /* linux is defined for DHD EFI builds also,
2969*4882a593Smuzhiyun * since its cross-compiled for EFI from linux.
2970*4882a593Smuzhiyun * dbgring_lock apis are meant only for linux
2971*4882a593Smuzhiyun * to use mutexes, other OSes will continue to
2972*4882a593Smuzhiyun * use osl_spin_lock
2973*4882a593Smuzhiyun */
2974*4882a593Smuzhiyun #if (defined(LINUX) || defined(linux)) && !defined(DHD_EFI)
2975*4882a593Smuzhiyun void *dhd_os_dbgring_lock_init(osl_t *osh);
2976*4882a593Smuzhiyun void dhd_os_dbgring_lock_deinit(osl_t *osh, void *mtx);
2977*4882a593Smuzhiyun unsigned long dhd_os_dbgring_lock(void *lock);
2978*4882a593Smuzhiyun void dhd_os_dbgring_unlock(void *lock, unsigned long flags);
2979*4882a593Smuzhiyun #endif /* (LINUX || linux) && !DHD_EFI */
2980*4882a593Smuzhiyun 
2981*4882a593Smuzhiyun #ifdef PCIE_INB_DW
2982*4882a593Smuzhiyun #ifdef DHD_EFI
2983*4882a593Smuzhiyun extern int dhd_os_ds_enter_wait(dhd_pub_t * pub, uint * condition);
2984*4882a593Smuzhiyun extern int dhd_os_ds_enter_wake(dhd_pub_t * pub);
2985*4882a593Smuzhiyun #else
dhd_os_ds_enter_wait(dhd_pub_t * pub,uint * condition)2986*4882a593Smuzhiyun static INLINE int dhd_os_ds_enter_wait(dhd_pub_t * pub, uint * condition)
2987*4882a593Smuzhiyun { return 1; }
dhd_os_ds_enter_wake(dhd_pub_t * pub)2988*4882a593Smuzhiyun static INLINE int dhd_os_ds_enter_wake(dhd_pub_t * pub)
2989*4882a593Smuzhiyun { return 0; }
2990*4882a593Smuzhiyun #endif /* DHD_EFI */
2991*4882a593Smuzhiyun #endif /* PCIE_INB_DW */
2992*4882a593Smuzhiyun 
2993*4882a593Smuzhiyun #if defined(LINUX) || defined(linux) || defined(DHD_EFI)
2994*4882a593Smuzhiyun extern int dhd_os_busbusy_wait_negation(dhd_pub_t * pub, uint * condition);
2995*4882a593Smuzhiyun extern int dhd_os_busbusy_wake(dhd_pub_t * pub);
2996*4882a593Smuzhiyun extern void dhd_os_tx_completion_wake(dhd_pub_t *dhd);
2997*4882a593Smuzhiyun extern int dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition);
2998*4882a593Smuzhiyun extern int dhd_os_d3ack_wait(dhd_pub_t * pub, uint * condition);
2999*4882a593Smuzhiyun extern int dhd_os_d3ack_wake(dhd_pub_t * pub);
3000*4882a593Smuzhiyun extern int dhd_os_dmaxfer_wait(dhd_pub_t *pub, uint *condition);
3001*4882a593Smuzhiyun extern int dhd_os_dmaxfer_wake(dhd_pub_t *pub);
3002*4882a593Smuzhiyun int dhd_os_busbusy_wait_bitmask(dhd_pub_t *pub, uint *var,
3003*4882a593Smuzhiyun 		uint bitmask, uint condition);
3004*4882a593Smuzhiyun #ifdef PCIE_INB_DW
3005*4882a593Smuzhiyun extern int dhd_os_ds_exit_wait(dhd_pub_t * pub, uint * condition);
3006*4882a593Smuzhiyun extern int dhd_os_ds_exit_wake(dhd_pub_t * pub);
3007*4882a593Smuzhiyun #endif /* PCIE_INB_DW */
3008*4882a593Smuzhiyun int dhd_os_tput_test_wait(dhd_pub_t *pub, uint *condition, uint timeout_ms);
3009*4882a593Smuzhiyun int dhd_os_tput_test_wake(dhd_pub_t * pub);
3010*4882a593Smuzhiyun #else
dhd_os_tput_test_wait(dhd_pub_t * pub,uint * condition,uint timeout_ms)3011*4882a593Smuzhiyun static INLINE int dhd_os_tput_test_wait(dhd_pub_t *pub, uint *condition,
3012*4882a593Smuzhiyun 		uint timeout_ms)
3013*4882a593Smuzhiyun { return 0; }
dhd_os_tput_test_wake(dhd_pub_t * pub)3014*4882a593Smuzhiyun static INLINE int dhd_os_tput_test_wake(dhd_pub_t * pub)
3015*4882a593Smuzhiyun { return 0; }
dhd_os_d3ack_wait(dhd_pub_t * pub,uint * condition)3016*4882a593Smuzhiyun static INLINE int dhd_os_d3ack_wait(dhd_pub_t * pub, uint * condition)
3017*4882a593Smuzhiyun { return dhd_os_ioctl_resp_wait(pub, condition); }
dhd_os_d3ack_wake(dhd_pub_t * pub)3018*4882a593Smuzhiyun static INLINE int dhd_os_d3ack_wake(dhd_pub_t * pub)
3019*4882a593Smuzhiyun { return dhd_os_ioctl_resp_wake(pub); }
3020*4882a593Smuzhiyun #ifdef PCIE_INB_DW
dhd_os_ds_exit_wait(dhd_pub_t * pub,uint * condition)3021*4882a593Smuzhiyun static INLINE int dhd_os_ds_exit_wait(dhd_pub_t * pub, uint * condition)
3022*4882a593Smuzhiyun { DHD_ERROR(("%s is Not supported for this platform", __FUNCTION__)); return 0; }
dhd_os_ds_exit_wake(dhd_pub_t * pub)3023*4882a593Smuzhiyun static INLINE int dhd_os_ds_exit_wake(dhd_pub_t * pub)
3024*4882a593Smuzhiyun { DHD_ERROR(("%s is Not supported for this platform", __FUNCTION__)); return 0; }
3025*4882a593Smuzhiyun #endif /* PCIE_INB_DW */
dhd_os_busbusy_wait_negation(dhd_pub_t * pub,uint * condition)3026*4882a593Smuzhiyun static INLINE int dhd_os_busbusy_wait_negation(dhd_pub_t * pub, uint * condition)
3027*4882a593Smuzhiyun { return 1; }
dhd_os_busbusy_wake(dhd_pub_t * pub)3028*4882a593Smuzhiyun static INLINE int dhd_os_busbusy_wake(dhd_pub_t * pub)
3029*4882a593Smuzhiyun { return 0; }
dhd_os_busbusy_wait_condition(dhd_pub_t * pub,uint * var,uint condition)3030*4882a593Smuzhiyun static INLINE int dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition)
3031*4882a593Smuzhiyun { return 0; }
dhd_os_dmaxfer_wait(dhd_pub_t * pub,uint * condition)3032*4882a593Smuzhiyun static INLINE int dhd_os_dmaxfer_wait(dhd_pub_t *pub, uint *condition)
3033*4882a593Smuzhiyun { return 0; }
dhd_os_dmaxfer_wake(dhd_pub_t * pub)3034*4882a593Smuzhiyun static INLINE int dhd_os_dmaxfer_wake(dhd_pub_t *pub)
3035*4882a593Smuzhiyun { return 0; }
dhd_os_busbusy_wait_bitmask(dhd_pub_t * pub,uint * var,uint bitmask,uint condition)3036*4882a593Smuzhiyun static INLINE int dhd_os_busbusy_wait_bitmask(dhd_pub_t *pub, uint *var,
3037*4882a593Smuzhiyun 		uint bitmask, uint condition)
3038*4882a593Smuzhiyun { return 0; }
3039*4882a593Smuzhiyun #endif /* LINUX || DHD_EFI */
3040*4882a593Smuzhiyun 
3041*4882a593Smuzhiyun #if defined(LINUX) || defined(linux)
3042*4882a593Smuzhiyun /*
3043*4882a593Smuzhiyun  * Manage sta objects in an interface. Interface is identified by an ifindex and
3044*4882a593Smuzhiyun  * sta(s) within an interfaces are managed using a MacAddress of the sta.
3045*4882a593Smuzhiyun  */
3046*4882a593Smuzhiyun struct dhd_sta;
3047*4882a593Smuzhiyun extern bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac);
3048*4882a593Smuzhiyun extern struct dhd_sta *dhd_find_sta(void *pub, int ifidx, void *ea);
3049*4882a593Smuzhiyun extern struct dhd_sta *dhd_findadd_sta(void *pub, int ifidx, void *ea);
3050*4882a593Smuzhiyun extern void dhd_del_all_sta(void *pub, int ifidx);
3051*4882a593Smuzhiyun extern void dhd_del_sta(void *pub, int ifidx, void *ea);
3052*4882a593Smuzhiyun extern int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx);
3053*4882a593Smuzhiyun extern int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val);
3054*4882a593Smuzhiyun extern int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx);
3055*4882a593Smuzhiyun extern struct net_device *dhd_linux_get_primary_netdev(dhd_pub_t *dhdp);
3056*4882a593Smuzhiyun #else /* LINUX */
dhd_sta_associated(dhd_pub_t * dhdp,uint32 bssidx,uint8 * mac)3057*4882a593Smuzhiyun static INLINE bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
3058*4882a593Smuzhiyun { return FALSE;}
dhd_find_sta(void * pub,int ifidx,void * ea)3059*4882a593Smuzhiyun static INLINE void* dhd_find_sta(void *pub, int ifidx, void *ea) { return NULL;}
dhd_findadd_sta(void * pub,int ifidx,void * ea)3060*4882a593Smuzhiyun static INLINE void *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
dhd_del_all_sta(void * pub,int ifidx)3061*4882a593Smuzhiyun static INLINE void dhd_del_all_sta(void *pub, int ifidx) { }
dhd_del_sta(void * pub,int ifidx,void * ea)3062*4882a593Smuzhiyun static INLINE void dhd_del_sta(void *pub, int ifidx, void *ea) { }
dhd_get_ap_isolate(dhd_pub_t * dhdp,uint32 idx)3063*4882a593Smuzhiyun static INLINE int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx) { return 0; }
dhd_set_ap_isolate(dhd_pub_t * dhdp,uint32 idx,int val)3064*4882a593Smuzhiyun static INLINE int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val) { return 0; }
dhd_bssidx2idx(dhd_pub_t * dhdp,uint32 bssidx)3065*4882a593Smuzhiyun static INLINE int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx) { return 0; }
3066*4882a593Smuzhiyun #endif /* LINUX */
3067*4882a593Smuzhiyun 
3068*4882a593Smuzhiyun extern bool dhd_is_concurrent_mode(dhd_pub_t *dhd);
3069*4882a593Smuzhiyun int dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len,
3070*4882a593Smuzhiyun 		char *res_buf, uint res_len, bool set);
3071*4882a593Smuzhiyun extern int dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf,
3072*4882a593Smuzhiyun 		uint cmd_len, char **resptr, uint resp_len);
3073*4882a593Smuzhiyun 
3074*4882a593Smuzhiyun #ifdef DHD_MCAST_REGEN
3075*4882a593Smuzhiyun extern int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx);
3076*4882a593Smuzhiyun extern int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val);
3077*4882a593Smuzhiyun #endif
3078*4882a593Smuzhiyun typedef enum cust_gpio_modes {
3079*4882a593Smuzhiyun 	WLAN_RESET_ON,
3080*4882a593Smuzhiyun 	WLAN_RESET_OFF,
3081*4882a593Smuzhiyun 	WLAN_POWER_ON,
3082*4882a593Smuzhiyun 	WLAN_POWER_OFF
3083*4882a593Smuzhiyun } cust_gpio_modes_t;
3084*4882a593Smuzhiyun 
3085*4882a593Smuzhiyun typedef struct dmaxref_mem_map {
3086*4882a593Smuzhiyun 	dhd_dma_buf_t *srcmem;
3087*4882a593Smuzhiyun 	dhd_dma_buf_t *dstmem;
3088*4882a593Smuzhiyun } dmaxref_mem_map_t;
3089*4882a593Smuzhiyun 
3090*4882a593Smuzhiyun #if defined(OEM_ANDROID)
3091*4882a593Smuzhiyun extern int wl_iw_iscan_set_scan_broadcast_prep(struct net_device *dev, uint flag);
3092*4882a593Smuzhiyun extern int wl_iw_send_priv_event(struct net_device *dev, char *flag);
3093*4882a593Smuzhiyun #endif /* defined(OEM_ANDROID) */
3094*4882a593Smuzhiyun 
3095*4882a593Smuzhiyun #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3096*4882a593Smuzhiyun extern void dhd_flush_rx_tx_wq(dhd_pub_t *dhdp);
3097*4882a593Smuzhiyun #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
3098*4882a593Smuzhiyun 
3099*4882a593Smuzhiyun /*
3100*4882a593Smuzhiyun  * Insmod parameters for debug/test
3101*4882a593Smuzhiyun  */
3102*4882a593Smuzhiyun 
3103*4882a593Smuzhiyun /* Watchdog timer interval */
3104*4882a593Smuzhiyun extern uint dhd_watchdog_ms;
3105*4882a593Smuzhiyun extern bool dhd_os_wd_timer_enabled(void *bus);
3106*4882a593Smuzhiyun #ifdef DHD_PCIE_RUNTIMEPM
3107*4882a593Smuzhiyun extern uint dhd_runtimepm_ms;
3108*4882a593Smuzhiyun #endif /* DHD_PCIE_RUNTIMEPM */
3109*4882a593Smuzhiyun 
3110*4882a593Smuzhiyun /** Default console output poll interval */
3111*4882a593Smuzhiyun extern uint dhd_console_ms;
3112*4882a593Smuzhiyun 
3113*4882a593Smuzhiyun extern uint android_msg_level;
3114*4882a593Smuzhiyun extern uint config_msg_level;
3115*4882a593Smuzhiyun extern uint sd_msglevel;
3116*4882a593Smuzhiyun extern uint dump_msg_level;
3117*4882a593Smuzhiyun #ifdef BCMDBUS
3118*4882a593Smuzhiyun extern uint dbus_msglevel;
3119*4882a593Smuzhiyun #endif /* BCMDBUS */
3120*4882a593Smuzhiyun #ifdef WL_WIRELESS_EXT
3121*4882a593Smuzhiyun extern uint iw_msg_level;
3122*4882a593Smuzhiyun #endif
3123*4882a593Smuzhiyun #ifdef WL_CFG80211
3124*4882a593Smuzhiyun extern uint wl_dbg_level;
3125*4882a593Smuzhiyun #endif
3126*4882a593Smuzhiyun 
3127*4882a593Smuzhiyun extern uint dhd_slpauto;
3128*4882a593Smuzhiyun 
3129*4882a593Smuzhiyun /* Use interrupts */
3130*4882a593Smuzhiyun extern uint dhd_intr;
3131*4882a593Smuzhiyun 
3132*4882a593Smuzhiyun /* Use polling */
3133*4882a593Smuzhiyun extern uint dhd_poll;
3134*4882a593Smuzhiyun 
3135*4882a593Smuzhiyun /* ARP offload agent mode */
3136*4882a593Smuzhiyun extern uint dhd_arp_mode;
3137*4882a593Smuzhiyun 
3138*4882a593Smuzhiyun /* Pkt filte enable control */
3139*4882a593Smuzhiyun extern uint dhd_pkt_filter_enable;
3140*4882a593Smuzhiyun 
3141*4882a593Smuzhiyun /*  Pkt filter init setup */
3142*4882a593Smuzhiyun extern uint dhd_pkt_filter_init;
3143*4882a593Smuzhiyun 
3144*4882a593Smuzhiyun /* Pkt filter mode control */
3145*4882a593Smuzhiyun extern uint dhd_master_mode;
3146*4882a593Smuzhiyun 
3147*4882a593Smuzhiyun /* Roaming mode control */
3148*4882a593Smuzhiyun extern uint dhd_roam_disable;
3149*4882a593Smuzhiyun 
3150*4882a593Smuzhiyun /* Roaming mode control */
3151*4882a593Smuzhiyun extern uint dhd_radio_up;
3152*4882a593Smuzhiyun 
3153*4882a593Smuzhiyun /* TCM verification control */
3154*4882a593Smuzhiyun extern uint dhd_tcm_test_enable;
3155*4882a593Smuzhiyun 
3156*4882a593Smuzhiyun /* Initial idletime ticks (may be -1 for immediate idle, 0 for no idle) */
3157*4882a593Smuzhiyun extern int dhd_idletime;
3158*4882a593Smuzhiyun #ifdef DHD_USE_IDLECOUNT
3159*4882a593Smuzhiyun #define DHD_IDLETIME_TICKS 5
3160*4882a593Smuzhiyun #else
3161*4882a593Smuzhiyun #define DHD_IDLETIME_TICKS 1
3162*4882a593Smuzhiyun #endif /* DHD_USE_IDLECOUNT */
3163*4882a593Smuzhiyun 
3164*4882a593Smuzhiyun /* SDIO Drive Strength */
3165*4882a593Smuzhiyun extern uint dhd_sdiod_drive_strength;
3166*4882a593Smuzhiyun 
3167*4882a593Smuzhiyun /* triggers bcm_bprintf to print to kernel log */
3168*4882a593Smuzhiyun extern bool bcm_bprintf_bypass;
3169*4882a593Smuzhiyun 
3170*4882a593Smuzhiyun /* Override to force tx queueing all the time */
3171*4882a593Smuzhiyun extern uint dhd_force_tx_queueing;
3172*4882a593Smuzhiyun 
3173*4882a593Smuzhiyun /* Default bcn_timeout value is 4 */
3174*4882a593Smuzhiyun #define DEFAULT_BCN_TIMEOUT_VALUE	4
3175*4882a593Smuzhiyun #ifndef CUSTOM_BCN_TIMEOUT_SETTING
3176*4882a593Smuzhiyun #define CUSTOM_BCN_TIMEOUT_SETTING	DEFAULT_BCN_TIMEOUT_VALUE
3177*4882a593Smuzhiyun #endif
3178*4882a593Smuzhiyun 
3179*4882a593Smuzhiyun /* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
3180*4882a593Smuzhiyun #define DEFAULT_KEEP_ALIVE_VALUE 	55000 /* msec */
3181*4882a593Smuzhiyun #ifndef CUSTOM_KEEP_ALIVE_SETTING
3182*4882a593Smuzhiyun #define CUSTOM_KEEP_ALIVE_SETTING 	DEFAULT_KEEP_ALIVE_VALUE
3183*4882a593Smuzhiyun #endif /* DEFAULT_KEEP_ALIVE_VALUE */
3184*4882a593Smuzhiyun 
3185*4882a593Smuzhiyun #define NULL_PKT_STR	"null_pkt"
3186*4882a593Smuzhiyun 
3187*4882a593Smuzhiyun /* hooks for custom glom setting option via Makefile */
3188*4882a593Smuzhiyun #define DEFAULT_GLOM_VALUE 	-1
3189*4882a593Smuzhiyun #ifndef CUSTOM_GLOM_SETTING
3190*4882a593Smuzhiyun #define CUSTOM_GLOM_SETTING 	DEFAULT_GLOM_VALUE
3191*4882a593Smuzhiyun #endif
3192*4882a593Smuzhiyun #define WL_AUTO_ROAM_TRIGGER -75
3193*4882a593Smuzhiyun /* hooks for custom Roaming Trigger  setting via Makefile */
3194*4882a593Smuzhiyun #define DEFAULT_ROAM_TRIGGER_VALUE -75 /* dBm default roam trigger all band */
3195*4882a593Smuzhiyun #define DEFAULT_ROAM_TRIGGER_SETTING 	-1
3196*4882a593Smuzhiyun #ifndef CUSTOM_ROAM_TRIGGER_SETTING
3197*4882a593Smuzhiyun #define CUSTOM_ROAM_TRIGGER_SETTING 	DEFAULT_ROAM_TRIGGER_VALUE
3198*4882a593Smuzhiyun #endif
3199*4882a593Smuzhiyun 
3200*4882a593Smuzhiyun /* hooks for custom Roaming Romaing  setting via Makefile */
3201*4882a593Smuzhiyun #define DEFAULT_ROAM_DELTA_VALUE  10 /* dBm default roam delta all band */
3202*4882a593Smuzhiyun #define DEFAULT_ROAM_DELTA_SETTING 	-1
3203*4882a593Smuzhiyun #ifndef CUSTOM_ROAM_DELTA_SETTING
3204*4882a593Smuzhiyun #define CUSTOM_ROAM_DELTA_SETTING 	DEFAULT_ROAM_DELTA_VALUE
3205*4882a593Smuzhiyun #endif
3206*4882a593Smuzhiyun 
3207*4882a593Smuzhiyun /* hooks for custom PNO Event wake lock to guarantee enough time
3208*4882a593Smuzhiyun 	for the Platform to detect Event before system suspended
3209*4882a593Smuzhiyun */
3210*4882a593Smuzhiyun #define DEFAULT_PNO_EVENT_LOCK_xTIME 	2 	/* multiplay of DHD_PACKET_TIMEOUT_MS */
3211*4882a593Smuzhiyun #ifndef CUSTOM_PNO_EVENT_LOCK_xTIME
3212*4882a593Smuzhiyun #define CUSTOM_PNO_EVENT_LOCK_xTIME	 DEFAULT_PNO_EVENT_LOCK_xTIME
3213*4882a593Smuzhiyun #endif
3214*4882a593Smuzhiyun /* hooks for custom dhd_dpc_prio setting option via Makefile */
3215*4882a593Smuzhiyun #define DEFAULT_DHP_DPC_PRIO  1
3216*4882a593Smuzhiyun #ifndef CUSTOM_DPC_PRIO_SETTING
3217*4882a593Smuzhiyun #define CUSTOM_DPC_PRIO_SETTING 	DEFAULT_DHP_DPC_PRIO
3218*4882a593Smuzhiyun #endif
3219*4882a593Smuzhiyun 
3220*4882a593Smuzhiyun #ifndef CUSTOM_LISTEN_INTERVAL
3221*4882a593Smuzhiyun #define CUSTOM_LISTEN_INTERVAL 		LISTEN_INTERVAL
3222*4882a593Smuzhiyun #endif /* CUSTOM_LISTEN_INTERVAL */
3223*4882a593Smuzhiyun 
3224*4882a593Smuzhiyun #define DEFAULT_SUSPEND_BCN_LI_DTIM		3
3225*4882a593Smuzhiyun #ifndef CUSTOM_SUSPEND_BCN_LI_DTIM
3226*4882a593Smuzhiyun #define CUSTOM_SUSPEND_BCN_LI_DTIM		DEFAULT_SUSPEND_BCN_LI_DTIM
3227*4882a593Smuzhiyun #endif
3228*4882a593Smuzhiyun 
3229*4882a593Smuzhiyun #ifdef OEM_ANDROID
3230*4882a593Smuzhiyun #ifndef BCN_TIMEOUT_IN_SUSPEND
3231*4882a593Smuzhiyun #define BCN_TIMEOUT_IN_SUSPEND			6 /* bcn timeout value in suspend mode */
3232*4882a593Smuzhiyun #endif
3233*4882a593Smuzhiyun #endif /* OEM_ANDROID */
3234*4882a593Smuzhiyun 
3235*4882a593Smuzhiyun #ifndef CUSTOM_RXF_PRIO_SETTING
3236*4882a593Smuzhiyun #define CUSTOM_RXF_PRIO_SETTING		MAX((CUSTOM_DPC_PRIO_SETTING - 1), 1)
3237*4882a593Smuzhiyun #endif
3238*4882a593Smuzhiyun 
3239*4882a593Smuzhiyun #define DEFAULT_WIFI_TURNOFF_DELAY		0
3240*4882a593Smuzhiyun #ifndef WIFI_TURNOFF_DELAY
3241*4882a593Smuzhiyun #define WIFI_TURNOFF_DELAY		DEFAULT_WIFI_TURNOFF_DELAY
3242*4882a593Smuzhiyun #endif /* WIFI_TURNOFF_DELAY */
3243*4882a593Smuzhiyun 
3244*4882a593Smuzhiyun #define DEFAULT_WIFI_TURNON_DELAY		200
3245*4882a593Smuzhiyun #ifndef WIFI_TURNON_DELAY
3246*4882a593Smuzhiyun #define WIFI_TURNON_DELAY		DEFAULT_WIFI_TURNON_DELAY
3247*4882a593Smuzhiyun #endif /* WIFI_TURNON_DELAY */
3248*4882a593Smuzhiyun 
3249*4882a593Smuzhiyun #ifdef BCMSDIO
3250*4882a593Smuzhiyun #define DEFAULT_DHD_WATCHDOG_INTERVAL_MS	10 /* msec */
3251*4882a593Smuzhiyun #else
3252*4882a593Smuzhiyun #define DEFAULT_DHD_WATCHDOG_INTERVAL_MS	0 /* msec */
3253*4882a593Smuzhiyun #endif
3254*4882a593Smuzhiyun #ifndef CUSTOM_DHD_WATCHDOG_MS
3255*4882a593Smuzhiyun #define CUSTOM_DHD_WATCHDOG_MS			DEFAULT_DHD_WATCHDOG_INTERVAL_MS
3256*4882a593Smuzhiyun #endif /* DEFAULT_DHD_WATCHDOG_INTERVAL_MS */
3257*4882a593Smuzhiyun 
3258*4882a593Smuzhiyun #define	DHD_INB_DW_DEASSERT_MS			250
3259*4882a593Smuzhiyun 
3260*4882a593Smuzhiyun #define DEFAULT_ASSOC_RETRY_MAX			3
3261*4882a593Smuzhiyun #ifndef CUSTOM_ASSOC_RETRY_MAX
3262*4882a593Smuzhiyun #define CUSTOM_ASSOC_RETRY_MAX			DEFAULT_ASSOC_RETRY_MAX
3263*4882a593Smuzhiyun #endif /* DEFAULT_ASSOC_RETRY_MAX */
3264*4882a593Smuzhiyun 
3265*4882a593Smuzhiyun #if defined(BCMSDIO) || defined(DISABLE_FRAMEBURST)
3266*4882a593Smuzhiyun #define DEFAULT_FRAMEBURST_SET			0
3267*4882a593Smuzhiyun #else
3268*4882a593Smuzhiyun #define DEFAULT_FRAMEBURST_SET			1
3269*4882a593Smuzhiyun #endif /* BCMSDIO */
3270*4882a593Smuzhiyun 
3271*4882a593Smuzhiyun #ifndef CUSTOM_FRAMEBURST_SET
3272*4882a593Smuzhiyun #define CUSTOM_FRAMEBURST_SET			DEFAULT_FRAMEBURST_SET
3273*4882a593Smuzhiyun #endif /* CUSTOM_FRAMEBURST_SET */
3274*4882a593Smuzhiyun 
3275*4882a593Smuzhiyun #ifdef WLTDLS
3276*4882a593Smuzhiyun #ifndef CUSTOM_TDLS_IDLE_MODE_SETTING
3277*4882a593Smuzhiyun #define CUSTOM_TDLS_IDLE_MODE_SETTING  60000 /* 60sec to tear down TDLS of not active */
3278*4882a593Smuzhiyun #endif
3279*4882a593Smuzhiyun #ifndef CUSTOM_TDLS_RSSI_THRESHOLD_HIGH
3280*4882a593Smuzhiyun #define CUSTOM_TDLS_RSSI_THRESHOLD_HIGH -70 /* rssi threshold for establishing TDLS link */
3281*4882a593Smuzhiyun #endif
3282*4882a593Smuzhiyun #ifndef CUSTOM_TDLS_RSSI_THRESHOLD_LOW
3283*4882a593Smuzhiyun #define CUSTOM_TDLS_RSSI_THRESHOLD_LOW -80 /* rssi threshold for tearing down TDLS link */
3284*4882a593Smuzhiyun #endif
3285*4882a593Smuzhiyun #ifndef CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH
3286*4882a593Smuzhiyun #define CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH 100 /* pkt/sec threshold for establishing TDLS link */
3287*4882a593Smuzhiyun #endif
3288*4882a593Smuzhiyun #ifndef CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW
3289*4882a593Smuzhiyun #define CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW 10 /* pkt/sec threshold for tearing down TDLS link */
3290*4882a593Smuzhiyun #endif
3291*4882a593Smuzhiyun #endif /* WLTDLS */
3292*4882a593Smuzhiyun 
3293*4882a593Smuzhiyun #if defined(VSDB) || defined(ROAM_ENABLE)
3294*4882a593Smuzhiyun #define DEFAULT_BCN_TIMEOUT            6
3295*4882a593Smuzhiyun #else
3296*4882a593Smuzhiyun #define DEFAULT_BCN_TIMEOUT            4
3297*4882a593Smuzhiyun #endif /* CUSTOMER_HW4 && (VSDB || ROAM_ENABLE) */
3298*4882a593Smuzhiyun 
3299*4882a593Smuzhiyun #ifndef CUSTOM_BCN_TIMEOUT
3300*4882a593Smuzhiyun #define CUSTOM_BCN_TIMEOUT             DEFAULT_BCN_TIMEOUT
3301*4882a593Smuzhiyun #endif
3302*4882a593Smuzhiyun 
3303*4882a593Smuzhiyun #define DEFAULT_BCN_TIMEOUT_IN_SUSPEND	10
3304*4882a593Smuzhiyun #ifndef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
3305*4882a593Smuzhiyun #define CUSTOM_BCN_TIMEOUT_IN_SUSPEND	DEFAULT_BCN_TIMEOUT_IN_SUSPEND
3306*4882a593Smuzhiyun #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
3307*4882a593Smuzhiyun 
3308*4882a593Smuzhiyun #define MAX_DTIM_SKIP_BEACON_INTERVAL	100 /* max allowed associated AP beacon for DTIM skip */
3309*4882a593Smuzhiyun #ifndef MAX_DTIM_ALLOWED_INTERVAL
3310*4882a593Smuzhiyun #define MAX_DTIM_ALLOWED_INTERVAL 600 /* max allowed total beacon interval for DTIM skip */
3311*4882a593Smuzhiyun #endif
3312*4882a593Smuzhiyun 
3313*4882a593Smuzhiyun #ifdef OEM_ANDROID
3314*4882a593Smuzhiyun #ifndef MIN_DTIM_FOR_ROAM_THRES_EXTEND
3315*4882a593Smuzhiyun #define MIN_DTIM_FOR_ROAM_THRES_EXTEND	600 /* minimum dtim interval to extend roam threshold */
3316*4882a593Smuzhiyun #endif
3317*4882a593Smuzhiyun #endif /* OEM_ANDROID */
3318*4882a593Smuzhiyun 
3319*4882a593Smuzhiyun #ifdef CONFIG_ROAM_RSSI_LIMIT
3320*4882a593Smuzhiyun extern int dhd_roam_rssi_limit_get(dhd_pub_t *dhd, int *lmt2g, int *lmt5g);
3321*4882a593Smuzhiyun extern int dhd_roam_rssi_limit_set(dhd_pub_t *dhd, int lmt2g, int lmt5g);
3322*4882a593Smuzhiyun #ifndef CUSTOM_ROAMRSSI_2G
3323*4882a593Smuzhiyun #define CUSTOM_ROAMRSSI_2G		ROAMRSSI_2G_DEFAULT
3324*4882a593Smuzhiyun #endif /* CUSTOM_ROAMRSSI_2G */
3325*4882a593Smuzhiyun #ifndef CUSTOM_ROAMRSSI_5G
3326*4882a593Smuzhiyun #define CUSTOM_ROAMRSSI_5G		ROAMRSSI_5G_DEFAULT
3327*4882a593Smuzhiyun #endif /* CUSTOM_ROAMRSSI_5G */
3328*4882a593Smuzhiyun #endif /* CONFIG_ROAM_RSSI_LIMIT */
3329*4882a593Smuzhiyun #ifdef CONFIG_ROAM_MIN_DELTA
3330*4882a593Smuzhiyun extern int dhd_roam_min_delta_get(dhd_pub_t *dhd, uint32 *dt2g, uint32 *dt5g);
3331*4882a593Smuzhiyun extern int dhd_roam_min_delta_set(dhd_pub_t *dhd, uint32 dt2g, uint32 dt5g);
3332*4882a593Smuzhiyun #ifndef CUSTOM_ROAM_MIN_DELTA
3333*4882a593Smuzhiyun #define CUSTOM_ROAM_MIN_DELTA		ROAM_MIN_DELTA_DEFAULT
3334*4882a593Smuzhiyun #endif /* CUSTOM_ROAM_MIN_DELTA */
3335*4882a593Smuzhiyun #endif /* CONFIG_ROAM_MIN_DELTA */
3336*4882a593Smuzhiyun 
3337*4882a593Smuzhiyun #define NO_DTIM_SKIP 1
3338*4882a593Smuzhiyun #ifdef SDTEST
3339*4882a593Smuzhiyun /* Echo packet generator (SDIO), pkts/s */
3340*4882a593Smuzhiyun extern uint dhd_pktgen;
3341*4882a593Smuzhiyun 
3342*4882a593Smuzhiyun /* Echo packet len (0 => sawtooth, max 1800) */
3343*4882a593Smuzhiyun extern uint dhd_pktgen_len;
3344*4882a593Smuzhiyun #define MAX_PKTGEN_LEN 1800
3345*4882a593Smuzhiyun #endif
3346*4882a593Smuzhiyun 
3347*4882a593Smuzhiyun #ifdef BCMSLTGT
3348*4882a593Smuzhiyun /* Account for slow hardware (QT) */
3349*4882a593Smuzhiyun extern uint htclkratio;
3350*4882a593Smuzhiyun extern int dngl_xtalfreq;
3351*4882a593Smuzhiyun #endif
3352*4882a593Smuzhiyun 
3353*4882a593Smuzhiyun /* optionally set by a module_param_string() */
3354*4882a593Smuzhiyun #define MOD_PARAM_PATHLEN	2048
3355*4882a593Smuzhiyun #define MOD_PARAM_INFOLEN	512
3356*4882a593Smuzhiyun #define MOD_PARAM_SRLEN		64
3357*4882a593Smuzhiyun 
3358*4882a593Smuzhiyun #ifdef SOFTAP
3359*4882a593Smuzhiyun extern char fw_path2[MOD_PARAM_PATHLEN];
3360*4882a593Smuzhiyun #endif
3361*4882a593Smuzhiyun 
3362*4882a593Smuzhiyun #if defined(CUSTOMER_HW4)
3363*4882a593Smuzhiyun #define VENDOR_PATH "/vendor"
3364*4882a593Smuzhiyun #else
3365*4882a593Smuzhiyun #define VENDOR_PATH ""
3366*4882a593Smuzhiyun #endif /* CUSTOMER_HW4 */
3367*4882a593Smuzhiyun 
3368*4882a593Smuzhiyun /* Platform path Name -
3369*4882a593Smuzhiyun  * Used to find out where to find the FW debug support files.
3370*4882a593Smuzhiyun  * 1) If the Platform Makefile mentions from where it should be
3371*4882a593Smuzhiyun  *    picked from use it.
3372*4882a593Smuzhiyun  * 2) If Platform Makefile does not mention anything,use the
3373*4882a593Smuzhiyun  *    scheme as mapped below
3374*4882a593Smuzhiyun  */
3375*4882a593Smuzhiyun #if !defined(PLATFORM_PATH)
3376*4882a593Smuzhiyun /* First Overrides */
3377*4882a593Smuzhiyun #if defined(DHD_LEGACY_FILE_PATH)
3378*4882a593Smuzhiyun /* If Legacy file path is to be used */
3379*4882a593Smuzhiyun #define PLATFORM_PATH	"/data/"
3380*4882a593Smuzhiyun #elif defined(PLATFORM_SLP)
3381*4882a593Smuzhiyun /* Path Name for SLP */
3382*4882a593Smuzhiyun #define PLATFORM_PATH	"/opt/etc/"
3383*4882a593Smuzhiyun #else
3384*4882a593Smuzhiyun /* End of Overrides, rely on what is dictated by Android */
3385*4882a593Smuzhiyun #if defined(CUSTOMER_HW4)
3386*4882a593Smuzhiyun #define PLATFORM_PATH	"/data/vendor/conn/"
3387*4882a593Smuzhiyun #else
3388*4882a593Smuzhiyun #define PLATFORM_PATH	"/data/misc/conn/"
3389*4882a593Smuzhiyun #endif /* CUSTOMER_HW4  */
3390*4882a593Smuzhiyun #define DHD_MAC_ADDR_EXPORT
3391*4882a593Smuzhiyun #define DHD_ADPS_BAM_EXPORT
3392*4882a593Smuzhiyun #define DHD_EXPORT_CNTL_FILE
3393*4882a593Smuzhiyun #define DHD_SOFTAP_DUAL_IF_INFO
3394*4882a593Smuzhiyun #define DHD_SEND_HANG_PRIVCMD_ERRORS
3395*4882a593Smuzhiyun #endif /* DHD_LEGACY_FILE_PATH */
3396*4882a593Smuzhiyun #endif /* !PLATFORM_PATH */
3397*4882a593Smuzhiyun 
3398*4882a593Smuzhiyun #ifdef DHD_MAC_ADDR_EXPORT
3399*4882a593Smuzhiyun extern struct ether_addr sysfs_mac_addr;
3400*4882a593Smuzhiyun #endif /* DHD_MAC_ADDR_EXPORT */
3401*4882a593Smuzhiyun 
3402*4882a593Smuzhiyun #if defined(LINUX) || defined(linux)
3403*4882a593Smuzhiyun /* Flag to indicate if we should download firmware on driver load */
3404*4882a593Smuzhiyun extern uint dhd_download_fw_on_driverload;
3405*4882a593Smuzhiyun #ifndef BCMDBUS
3406*4882a593Smuzhiyun extern int allow_delay_fwdl;
3407*4882a593Smuzhiyun #endif /* !BCMDBUS */
3408*4882a593Smuzhiyun 
3409*4882a593Smuzhiyun extern int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost);
3410*4882a593Smuzhiyun extern int dhd_write_file(const char *filepath, char *buf, int buf_len);
3411*4882a593Smuzhiyun extern int dhd_read_file(const char *filepath, char *buf, int buf_len);
3412*4882a593Smuzhiyun extern int dhd_write_file_and_check(const char *filepath, char *buf, int buf_len);
3413*4882a593Smuzhiyun extern int dhd_file_delete(char *path);
3414*4882a593Smuzhiyun 
3415*4882a593Smuzhiyun #ifdef READ_MACADDR
3416*4882a593Smuzhiyun extern int dhd_set_macaddr_from_file(dhd_pub_t *dhdp);
3417*4882a593Smuzhiyun #else
dhd_set_macaddr_from_file(dhd_pub_t * dhdp)3418*4882a593Smuzhiyun static INLINE int dhd_set_macaddr_from_file(dhd_pub_t *dhdp) { return 0; }
3419*4882a593Smuzhiyun #endif /* READ_MACADDR */
3420*4882a593Smuzhiyun #ifdef WRITE_MACADDR
3421*4882a593Smuzhiyun extern int dhd_write_macaddr(struct ether_addr *mac);
3422*4882a593Smuzhiyun #else
dhd_write_macaddr(struct ether_addr * mac)3423*4882a593Smuzhiyun static INLINE int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
3424*4882a593Smuzhiyun #endif /* WRITE_MACADDR */
3425*4882a593Smuzhiyun 
3426*4882a593Smuzhiyun #if defined(USE_CID_CHECK) || defined(USE_DIRECT_VID_TAG)
3427*4882a593Smuzhiyun #if defined(BCM4361_CHIP) || defined(BCM4375_CHIP) || defined(BCM4389_CHIP_DEF)
3428*4882a593Smuzhiyun #define DHD_USE_CISINFO_FROM_OTP
3429*4882a593Smuzhiyun /* For COB, can't check CID/MAC in OTP, so, define it here */
3430*4882a593Smuzhiyun #define DHD_READ_CIS_FROM_BP
3431*4882a593Smuzhiyun #endif /* CONFIG_BCM4361 || CONFIG_BCM4375 || CONFIG_BCM4389_DEF */
3432*4882a593Smuzhiyun #define MAX_VNAME_LEN		64
3433*4882a593Smuzhiyun #define MAX_VID_LEN		8
3434*4882a593Smuzhiyun #define MODULE_NAME_INDEX_MAX	3
3435*4882a593Smuzhiyun #define MAX_EXTENSION 20
3436*4882a593Smuzhiyun typedef struct {
3437*4882a593Smuzhiyun 	char cid_ext[MAX_EXTENSION];
3438*4882a593Smuzhiyun 	char nvram_ext[MAX_EXTENSION];
3439*4882a593Smuzhiyun 	char fw_ext[MAX_EXTENSION];
3440*4882a593Smuzhiyun } naming_info_t;
3441*4882a593Smuzhiyun #ifdef DHD_EXPORT_CNTL_FILE
3442*4882a593Smuzhiyun extern char cidinfostr[MAX_VNAME_LEN];
3443*4882a593Smuzhiyun #endif /* DHD_EXPORT_CNTL_FILE */
3444*4882a593Smuzhiyun extern int dhd_check_module_cid(dhd_pub_t *dhdp);
3445*4882a593Smuzhiyun #else
dhd_check_module_cid(dhd_pub_t * dhdp)3446*4882a593Smuzhiyun static INLINE int dhd_check_module_cid(dhd_pub_t *dhdp) { return 0; }
3447*4882a593Smuzhiyun #endif /* USE_CID_CHECK || USE_DIRECT_VID_TAG */
3448*4882a593Smuzhiyun #ifdef USE_CID_CHECK
3449*4882a593Smuzhiyun extern char *dhd_get_cid_info(unsigned char *vid, int vid_length);
3450*4882a593Smuzhiyun #endif /* USE_CID_CHECK */
3451*4882a593Smuzhiyun #ifdef GET_MAC_FROM_OTP
3452*4882a593Smuzhiyun extern int dhd_check_module_mac(dhd_pub_t *dhdp);
3453*4882a593Smuzhiyun #else
dhd_check_module_mac(dhd_pub_t * dhdp)3454*4882a593Smuzhiyun static INLINE int dhd_check_module_mac(dhd_pub_t *dhdp) { return 0; }
3455*4882a593Smuzhiyun #endif /* GET_MAC_FROM_OTP */
3456*4882a593Smuzhiyun 
3457*4882a593Smuzhiyun #if defined(READ_MACADDR) || defined(WRITE_MACADDR) || defined(USE_CID_CHECK) || \
3458*4882a593Smuzhiyun 	defined(GET_MAC_FROM_OTP)
3459*4882a593Smuzhiyun #define DHD_USE_CISINFO
3460*4882a593Smuzhiyun #endif /* READ_MACADDR || WRITE_MACADDR || USE_CID_CHECK || GET_MAC_FROM_OTP */
3461*4882a593Smuzhiyun 
3462*4882a593Smuzhiyun #ifdef DHD_USE_CISINFO
3463*4882a593Smuzhiyun int dhd_read_cis(dhd_pub_t *dhdp);
3464*4882a593Smuzhiyun int dhd_read_otp_sw_rgn(dhd_pub_t *dhdp);
3465*4882a593Smuzhiyun void dhd_clear_cis(dhd_pub_t *dhdp);
3466*4882a593Smuzhiyun int dhd_alloc_cis(dhd_pub_t *dhdp);
3467*4882a593Smuzhiyun #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK)
3468*4882a593Smuzhiyun extern int dhd_check_module_b85a(void);
3469*4882a593Smuzhiyun extern int dhd_check_module_b90(void);
3470*4882a593Smuzhiyun #define BCM4359_MODULE_TYPE_B90B 1
3471*4882a593Smuzhiyun #define BCM4359_MODULE_TYPE_B90S 2
3472*4882a593Smuzhiyun #endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */
3473*4882a593Smuzhiyun #if defined(USE_CID_CHECK)
3474*4882a593Smuzhiyun extern int dhd_check_module_bcm(char *module_type, int index, bool *is_murata_fem);
3475*4882a593Smuzhiyun extern naming_info_t *
3476*4882a593Smuzhiyun dhd_find_naming_info(dhd_pub_t *dhdp, char *module_type);
3477*4882a593Smuzhiyun extern naming_info_t * dhd_find_naming_info_by_chip_rev(dhd_pub_t *dhdp, bool *is_murata_fem);
3478*4882a593Smuzhiyun #endif /* defined(USE_CID_CHECK) */
3479*4882a593Smuzhiyun #ifdef USE_DIRECT_VID_TAG
3480*4882a593Smuzhiyun #define VENDOR_OFF 1
3481*4882a593Smuzhiyun #define MD_REV_OFF 0
3482*4882a593Smuzhiyun #define A0_REV "_a0"
3483*4882a593Smuzhiyun #define B0_REV "_b0"
3484*4882a593Smuzhiyun extern int dhd_check_stored_module_info(char *vid);
3485*4882a593Smuzhiyun extern int concate_nvram_by_vid(dhd_pub_t *dhdp, char *nv_path, char *chipstr);
3486*4882a593Smuzhiyun #endif /* USE_DIRECT_VID_TAG */
3487*4882a593Smuzhiyun #if defined(USE_CID_CHECK) && defined(USE_DIRECT_VID_TAG)
3488*4882a593Smuzhiyun #error Please use USE_CID_CHECK/USE_DIRECT_VID_TAG exclusively
3489*4882a593Smuzhiyun #endif /* USE_CID_CHECK && USE_DIRECT_VID_TAG */
3490*4882a593Smuzhiyun #else
dhd_read_cis(dhd_pub_t * dhdp)3491*4882a593Smuzhiyun static INLINE int dhd_read_cis(dhd_pub_t *dhdp) { return 0; }
dhd_read_otp_sw_rgn(dhd_pub_t * dhdp)3492*4882a593Smuzhiyun static INLINE int dhd_read_otp_sw_rgn(dhd_pub_t *dhdp) { return 0; }
dhd_clear_cis(dhd_pub_t * dhdp)3493*4882a593Smuzhiyun static INLINE void dhd_clear_cis(dhd_pub_t *dhdp) { }
dhd_alloc_cis(dhd_pub_t * dhdp)3494*4882a593Smuzhiyun static INLINE int dhd_alloc_cis(dhd_pub_t *dhdp) { return 0; }
3495*4882a593Smuzhiyun #endif /* DHD_USE_CISINFO */
3496*4882a593Smuzhiyun 
3497*4882a593Smuzhiyun #else /* LINUX || linux */
dhd_process_cid_mac(dhd_pub_t * dhdp,bool prepost)3498*4882a593Smuzhiyun static INLINE int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost) { return 0; }
3499*4882a593Smuzhiyun #endif /* LINUX || linux */
3500*4882a593Smuzhiyun 
3501*4882a593Smuzhiyun #if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
3502*4882a593Smuzhiyun /* Flags to indicate if we distingish power off policy when
3503*4882a593Smuzhiyun  * user set the memu "Keep Wi-Fi on during sleep" to "Never"
3504*4882a593Smuzhiyun  */
3505*4882a593Smuzhiyun extern int trigger_deep_sleep;
3506*4882a593Smuzhiyun int dhd_deepsleep(struct net_device *dev, int flag);
3507*4882a593Smuzhiyun #endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
3508*4882a593Smuzhiyun 
3509*4882a593Smuzhiyun extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar);
3510*4882a593Smuzhiyun extern void dhd_wait_event_wakeup(dhd_pub_t*dhd);
3511*4882a593Smuzhiyun 
3512*4882a593Smuzhiyun #define IFLOCK_INIT(lock)       *lock = 0
3513*4882a593Smuzhiyun #define IFLOCK(lock)    while (InterlockedCompareExchange((lock), 1, 0))	\
3514*4882a593Smuzhiyun 	NdisStallExecution(1);
3515*4882a593Smuzhiyun #define IFUNLOCK(lock)  InterlockedExchange((lock), 0)
3516*4882a593Smuzhiyun #define IFLOCK_FREE(lock)
3517*4882a593Smuzhiyun #define FW_SUPPORTED(dhd, capa) ((strstr(dhd->fw_capabilities, " " #capa " ") != NULL))
3518*4882a593Smuzhiyun #ifdef ARP_OFFLOAD_SUPPORT
3519*4882a593Smuzhiyun #define MAX_IPV4_ENTRIES	8
3520*4882a593Smuzhiyun void dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode);
3521*4882a593Smuzhiyun void dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable);
3522*4882a593Smuzhiyun 
3523*4882a593Smuzhiyun /* dhd_commn arp offload wrapers */
3524*4882a593Smuzhiyun void dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx);
3525*4882a593Smuzhiyun void dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx);
3526*4882a593Smuzhiyun int dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx);
3527*4882a593Smuzhiyun void dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx);
3528*4882a593Smuzhiyun #endif /* ARP_OFFLOAD_SUPPORT */
3529*4882a593Smuzhiyun #ifdef WLTDLS
3530*4882a593Smuzhiyun int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac);
3531*4882a593Smuzhiyun int dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode);
3532*4882a593Smuzhiyun #ifdef PCIE_FULL_DONGLE
3533*4882a593Smuzhiyun int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event);
3534*4882a593Smuzhiyun int dhd_tdls_event_handler(dhd_pub_t *dhd_pub, wl_event_msg_t *event);
3535*4882a593Smuzhiyun int dhd_free_tdls_peer_list(dhd_pub_t *dhd_pub);
3536*4882a593Smuzhiyun #endif /* PCIE_FULL_DONGLE */
3537*4882a593Smuzhiyun #endif /* WLTDLS */
3538*4882a593Smuzhiyun 
3539*4882a593Smuzhiyun /* Neighbor Discovery Offload Support */
3540*4882a593Smuzhiyun extern int dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable);
3541*4882a593Smuzhiyun int dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipaddr, int idx);
3542*4882a593Smuzhiyun int dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx);
3543*4882a593Smuzhiyun 
3544*4882a593Smuzhiyun /* Enhanced ND offload support */
3545*4882a593Smuzhiyun uint16 dhd_ndo_get_version(dhd_pub_t *dhdp);
3546*4882a593Smuzhiyun int dhd_ndo_add_ip_with_type(dhd_pub_t *dhdp, char *ipv6addr, uint8 type, int idx);
3547*4882a593Smuzhiyun int dhd_ndo_remove_ip_by_addr(dhd_pub_t *dhdp, char *ipv6addr, int idx);
3548*4882a593Smuzhiyun int dhd_ndo_remove_ip_by_type(dhd_pub_t *dhdp, uint8 type, int idx);
3549*4882a593Smuzhiyun int dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t *dhdp, int enable);
3550*4882a593Smuzhiyun 
3551*4882a593Smuzhiyun /* ioctl processing for nl80211 */
3552*4882a593Smuzhiyun int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, struct dhd_ioctl *ioc, void *data_buf);
3553*4882a593Smuzhiyun 
3554*4882a593Smuzhiyun #if defined(SUPPORT_MULTIPLE_REVISION)
3555*4882a593Smuzhiyun extern int
3556*4882a593Smuzhiyun concate_revision(struct dhd_bus *bus, char *fwpath, char *nvpath);
3557*4882a593Smuzhiyun #endif /* SUPPORT_MULTIPLE_REVISION */
3558*4882a593Smuzhiyun void dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path,
3559*4882a593Smuzhiyun 											char *pclm_path, char *pconf_path);
3560*4882a593Smuzhiyun void dhd_set_bus_state(void *bus, uint32 state);
3561*4882a593Smuzhiyun 
3562*4882a593Smuzhiyun /* Remove proper pkts(either one no-frag pkt or whole fragmented pkts) */
3563*4882a593Smuzhiyun typedef int (*f_droppkt_t)(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ);
3564*4882a593Smuzhiyun extern bool dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn);
3565*4882a593Smuzhiyun 
3566*4882a593Smuzhiyun #ifdef PROP_TXSTATUS
3567*4882a593Smuzhiyun int dhd_os_wlfc_block(dhd_pub_t *pub);
3568*4882a593Smuzhiyun int dhd_os_wlfc_unblock(dhd_pub_t *pub);
3569*4882a593Smuzhiyun extern const uint8 prio2fifo[];
3570*4882a593Smuzhiyun #endif /* PROP_TXSTATUS */
3571*4882a593Smuzhiyun 
3572*4882a593Smuzhiyun int dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size);
3573*4882a593Smuzhiyun int dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size);
3574*4882a593Smuzhiyun int dhd_common_socram_dump(dhd_pub_t *dhdp);
3575*4882a593Smuzhiyun 
3576*4882a593Smuzhiyun int dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen);
3577*4882a593Smuzhiyun 
3578*4882a593Smuzhiyun int dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size);
3579*4882a593Smuzhiyun void dhd_get_memdump_filename(struct net_device *ndev, char *memdump_path, int len, char *fname);
3580*4882a593Smuzhiyun uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail);
3581*4882a593Smuzhiyun void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size);
3582*4882a593Smuzhiyun 
3583*4882a593Smuzhiyun #if defined(CONFIG_DHD_USE_STATIC_BUF)
3584*4882a593Smuzhiyun #define DHD_OS_PREALLOC(dhdpub, section, size) dhd_os_prealloc(dhdpub, section, size, FALSE)
3585*4882a593Smuzhiyun #define DHD_OS_PREFREE(dhdpub, addr, size) dhd_os_prefree(dhdpub, addr, size)
3586*4882a593Smuzhiyun #else
3587*4882a593Smuzhiyun #define DHD_OS_PREALLOC(dhdpub, section, size) MALLOC(dhdpub->osh, size)
3588*4882a593Smuzhiyun #define DHD_OS_PREFREE(dhdpub, addr, size) MFREE(dhdpub->osh, addr, size)
3589*4882a593Smuzhiyun #endif /* defined(CONFIG_DHD_USE_STATIC_BUF) */
3590*4882a593Smuzhiyun 
3591*4882a593Smuzhiyun #ifdef USE_WFA_CERT_CONF
3592*4882a593Smuzhiyun enum {
3593*4882a593Smuzhiyun 	SET_PARAM_BUS_TXGLOM_MODE,
3594*4882a593Smuzhiyun 	SET_PARAM_ROAMOFF,
3595*4882a593Smuzhiyun #ifdef USE_WL_FRAMEBURST
3596*4882a593Smuzhiyun 	SET_PARAM_FRAMEBURST,
3597*4882a593Smuzhiyun #endif /* USE_WL_FRAMEBURST */
3598*4882a593Smuzhiyun #ifdef USE_WL_TXBF
3599*4882a593Smuzhiyun 	SET_PARAM_TXBF,
3600*4882a593Smuzhiyun #endif /* USE_WL_TXBF */
3601*4882a593Smuzhiyun #ifdef PROP_TXSTATUS
3602*4882a593Smuzhiyun 	SET_PARAM_PROPTX,
3603*4882a593Smuzhiyun 	SET_PARAM_PROPTXMODE,
3604*4882a593Smuzhiyun #endif /* PROP_TXSTATUS */
3605*4882a593Smuzhiyun 	PARAM_LAST_VALUE
3606*4882a593Smuzhiyun };
3607*4882a593Smuzhiyun extern int sec_get_param_wfa_cert(dhd_pub_t *dhd, int mode, uint* read_val);
3608*4882a593Smuzhiyun #ifdef DHD_EXPORT_CNTL_FILE
3609*4882a593Smuzhiyun #define VALUENOTSET 0xFFFFFFFFu
3610*4882a593Smuzhiyun extern uint32 bus_txglom;
3611*4882a593Smuzhiyun extern uint32 roam_off;
3612*4882a593Smuzhiyun #ifdef USE_WL_FRAMEBURST
3613*4882a593Smuzhiyun extern uint32 frameburst;
3614*4882a593Smuzhiyun #endif /* USE_WL_FRAMEBURST */
3615*4882a593Smuzhiyun #ifdef USE_WL_TXBF
3616*4882a593Smuzhiyun extern uint32 txbf;
3617*4882a593Smuzhiyun #endif /* USE_WL_TXBF */
3618*4882a593Smuzhiyun #ifdef PROP_TXSTATUS
3619*4882a593Smuzhiyun extern uint32 proptx;
3620*4882a593Smuzhiyun #endif /* PROP_TXSTATUS */
3621*4882a593Smuzhiyun #endif /* DHD_EXPORT_CNTL_FILE */
3622*4882a593Smuzhiyun #endif /* USE_WFA_CERT_CONF */
3623*4882a593Smuzhiyun 
3624*4882a593Smuzhiyun #if defined(BCM_ROUTER_DHD)
3625*4882a593Smuzhiyun #if defined(HNDCTF)
3626*4882a593Smuzhiyun bool dhd_ctf_hotbrc_check(dhd_pub_t *dhdp, uint8 *eh, int ifidx);
3627*4882a593Smuzhiyun void dhd_ctf_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
3628*4882a593Smuzhiyun bool dhd_l2_filter_chainable(dhd_pub_t *dhdp, uint8 *eh, int ifidx);
3629*4882a593Smuzhiyun bool dhd_wet_chainable(dhd_pub_t *dhdap);
3630*4882a593Smuzhiyun bool dhd_rx_pkt_chainable(dhd_pub_t *dhdp, int ifidx);
3631*4882a593Smuzhiyun #endif /* HNDCTF */
3632*4882a593Smuzhiyun extern void dhd_schedule_trap_log_dump(dhd_pub_t *dhdp,
3633*4882a593Smuzhiyun 	uint8 *buf, uint32 size);
3634*4882a593Smuzhiyun /* When a new flowid is allocated/deallocated, inform dhd. */
3635*4882a593Smuzhiyun extern void dhd_add_flowid(dhd_pub_t * dhdp, int ifidx,
3636*4882a593Smuzhiyun                           uint8 ac_prio, void * ea, uint16 flowid);
3637*4882a593Smuzhiyun extern void dhd_del_flowid(dhd_pub_t * dhdp, int ifidx, uint16 flowid);
3638*4882a593Smuzhiyun #else  /* ! BCM_ROUTER_DHD */
3639*4882a593Smuzhiyun #define dhd_add_flowid(pub, ifidx, ac_prio, ea, flowid)  do {} while (0)
3640*4882a593Smuzhiyun #define dhd_del_flowid(pub, ifidx, flowid)               do {} while (0)
3641*4882a593Smuzhiyun bool dhd_wet_chainable(dhd_pub_t *dhdp);
3642*4882a593Smuzhiyun #endif /* ! BCM_ROUTER_DHD */
3643*4882a593Smuzhiyun 
3644*4882a593Smuzhiyun extern unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub);
3645*4882a593Smuzhiyun extern void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags);
3646*4882a593Smuzhiyun 
3647*4882a593Smuzhiyun /** Miscellaenous DHD Spin Locks */
3648*4882a593Smuzhiyun 
3649*4882a593Smuzhiyun /* Enable DHD general spin lock/unlock */
3650*4882a593Smuzhiyun #define DHD_GENERAL_LOCK(dhdp, flags) \
3651*4882a593Smuzhiyun 	(flags) = dhd_os_general_spin_lock(dhdp)
3652*4882a593Smuzhiyun #define DHD_GENERAL_UNLOCK(dhdp, flags) \
3653*4882a593Smuzhiyun 	dhd_os_general_spin_unlock((dhdp), (flags))
3654*4882a593Smuzhiyun 
3655*4882a593Smuzhiyun /* Enable DHD timer spin lock/unlock */
3656*4882a593Smuzhiyun #define DHD_TIMER_LOCK(lock, flags)     (flags) = osl_spin_lock(lock)
3657*4882a593Smuzhiyun #define DHD_TIMER_UNLOCK(lock, flags)   osl_spin_unlock(lock, (flags))
3658*4882a593Smuzhiyun 
3659*4882a593Smuzhiyun /* Enable DHD flowring spin lock/unlock */
3660*4882a593Smuzhiyun #define DHD_FLOWRING_LOCK(lock, flags)     (flags) = osl_spin_lock(lock)
3661*4882a593Smuzhiyun #define DHD_FLOWRING_UNLOCK(lock, flags)   osl_spin_unlock((lock), (flags))
3662*4882a593Smuzhiyun 
3663*4882a593Smuzhiyun /* Enable DHD common flowring info spin lock/unlock */
3664*4882a593Smuzhiyun #define DHD_FLOWID_LOCK(lock, flags)       (flags) = osl_spin_lock(lock)
3665*4882a593Smuzhiyun #define DHD_FLOWID_UNLOCK(lock, flags)     osl_spin_unlock((lock), (flags))
3666*4882a593Smuzhiyun 
3667*4882a593Smuzhiyun /* Enable DHD common flowring list spin lock/unlock */
3668*4882a593Smuzhiyun #define DHD_FLOWRING_LIST_LOCK(lock, flags)       (flags) = osl_spin_lock(lock)
3669*4882a593Smuzhiyun #define DHD_FLOWRING_LIST_UNLOCK(lock, flags)     osl_spin_unlock((lock), (flags))
3670*4882a593Smuzhiyun 
3671*4882a593Smuzhiyun #define DHD_RING_LOCK(lock, flags)	(flags) = osl_spin_lock(lock)
3672*4882a593Smuzhiyun #define DHD_RING_UNLOCK(lock, flags)	osl_spin_unlock((lock), (flags))
3673*4882a593Smuzhiyun 
3674*4882a593Smuzhiyun #define DHD_BUS_LP_STATE_LOCK(lock, flags)	(flags) = osl_spin_lock(lock)
3675*4882a593Smuzhiyun #define DHD_BUS_LP_STATE_UNLOCK(lock, flags)	osl_spin_unlock((lock), (flags))
3676*4882a593Smuzhiyun 
3677*4882a593Smuzhiyun #define DHD_BAR1_SWITCH_LOCK(lock, flags)	(flags) = osl_spin_lock(lock)
3678*4882a593Smuzhiyun #define DHD_BAR1_SWITCH_UNLOCK(lock, flags)	osl_spin_unlock((lock), (flags))
3679*4882a593Smuzhiyun 
3680*4882a593Smuzhiyun #define DHD_BUS_PWR_REQ_LOCK(lock, flags)	(flags) = osl_spin_lock(lock)
3681*4882a593Smuzhiyun #define DHD_BUS_PWR_REQ_UNLOCK(lock, flags)	osl_spin_unlock((lock), (flags))
3682*4882a593Smuzhiyun 
3683*4882a593Smuzhiyun #ifdef PCIE_INB_DW
3684*4882a593Smuzhiyun #define DHD_BUS_DONGLE_DS_LOCK(lock, flags)	(flags) = osl_spin_lock(lock)
3685*4882a593Smuzhiyun #define DHD_BUS_DONGLE_DS_UNLOCK(lock, flags)	osl_spin_unlock((lock), (flags))
3686*4882a593Smuzhiyun #endif /* PCIE_INB_DW */
3687*4882a593Smuzhiyun 
3688*4882a593Smuzhiyun /* Enable DHD backplane spin lock/unlock */
3689*4882a593Smuzhiyun #define DHD_BACKPLANE_ACCESS_LOCK(lock, flags)     (flags) = osl_spin_lock(lock)
3690*4882a593Smuzhiyun #define DHD_BACKPLANE_ACCESS_UNLOCK(lock, flags)   osl_spin_unlock((lock), (flags))
3691*4882a593Smuzhiyun 
3692*4882a593Smuzhiyun #define DHD_BUS_INB_DW_LOCK(lock, flags)	(flags) = osl_spin_lock(lock)
3693*4882a593Smuzhiyun #define DHD_BUS_INB_DW_UNLOCK(lock, flags)	osl_spin_unlock((lock), (flags))
3694*4882a593Smuzhiyun 
3695*4882a593Smuzhiyun /* Enable DHD TDLS peer list spin lock/unlock */
3696*4882a593Smuzhiyun #ifdef WLTDLS
3697*4882a593Smuzhiyun #define DHD_TDLS_LOCK(lock, flags)       (flags) = osl_spin_lock(lock)
3698*4882a593Smuzhiyun #define DHD_TDLS_UNLOCK(lock, flags)     osl_spin_unlock((lock), (flags))
3699*4882a593Smuzhiyun #endif /* WLTDLS */
3700*4882a593Smuzhiyun 
3701*4882a593Smuzhiyun #define DHD_BUS_INB_DW_LOCK(lock, flags)	(flags) = osl_spin_lock(lock)
3702*4882a593Smuzhiyun #define DHD_BUS_INB_DW_UNLOCK(lock, flags)	osl_spin_unlock((lock), (flags))
3703*4882a593Smuzhiyun 
3704*4882a593Smuzhiyun #ifdef DBG_PKT_MON
3705*4882a593Smuzhiyun /* Enable DHD PKT MON spin lock/unlock */
3706*4882a593Smuzhiyun #define DHD_PKT_MON_LOCK(lock, flags)     (flags) = osl_spin_lock(lock)
3707*4882a593Smuzhiyun #define DHD_PKT_MON_UNLOCK(lock, flags)   osl_spin_unlock(lock, (flags))
3708*4882a593Smuzhiyun #endif /* DBG_PKT_MON */
3709*4882a593Smuzhiyun 
3710*4882a593Smuzhiyun #ifdef DHD_PKT_LOGGING
3711*4882a593Smuzhiyun /* Enable DHD PKT LOG spin lock/unlock */
3712*4882a593Smuzhiyun #define DHD_PKT_LOG_LOCK(lock, flags)     (flags) = osl_spin_lock(lock)
3713*4882a593Smuzhiyun #define DHD_PKT_LOG_UNLOCK(lock, flags)   osl_spin_unlock(lock, (flags))
3714*4882a593Smuzhiyun #endif /* DHD_PKT_LOGGING */
3715*4882a593Smuzhiyun 
3716*4882a593Smuzhiyun #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
3717*4882a593Smuzhiyun #define DHD_AWDL_STATS_LOCK(lock, flags)	(flags) = osl_spin_lock(lock)
3718*4882a593Smuzhiyun #define DHD_AWDL_STATS_UNLOCK(lock, flags)	osl_spin_unlock(lock, (flags))
3719*4882a593Smuzhiyun #endif /* DHD_AWDL && AWDL_SLOT_STATS */
3720*4882a593Smuzhiyun 
3721*4882a593Smuzhiyun #if defined(linux) || defined(LINUX)
3722*4882a593Smuzhiyun #define DHD_LINUX_GENERAL_LOCK(dhdp, flags)	DHD_GENERAL_LOCK(dhdp, flags)
3723*4882a593Smuzhiyun #define DHD_LINUX_GENERAL_UNLOCK(dhdp, flags)	DHD_GENERAL_UNLOCK(dhdp, flags)
3724*4882a593Smuzhiyun #else
3725*4882a593Smuzhiyun #define DHD_LINUX_GENERAL_LOCK(dhdp, flags)	do {BCM_REFERENCE(flags);} while (0)
3726*4882a593Smuzhiyun #define DHD_LINUX_GENERAL_UNLOCK(dhdp, flags)	do {BCM_REFERENCE(flags);} while (0)
3727*4882a593Smuzhiyun #endif
3728*4882a593Smuzhiyun 
3729*4882a593Smuzhiyun #define DHD_BUS_INB_DW_LOCK(lock, flags)	(flags) = osl_spin_lock(lock)
3730*4882a593Smuzhiyun #define DHD_BUS_INB_DW_UNLOCK(lock, flags)	osl_spin_unlock((lock), (flags))
3731*4882a593Smuzhiyun 
3732*4882a593Smuzhiyun #define DHD_RX_NAPI_QUEUE_LOCK(lock, flags)	(flags) = osl_spin_lock(lock)
3733*4882a593Smuzhiyun #define DHD_RX_NAPI_QUEUE_UNLOCK(lock, flags)	osl_spin_unlock((lock), (flags))
3734*4882a593Smuzhiyun 
3735*4882a593Smuzhiyun #define DHD_UP_LOCK(lock, flags)	(flags) = osl_spin_lock(lock)
3736*4882a593Smuzhiyun #define DHD_UP_UNLOCK(lock, flags)	osl_spin_unlock((lock), (flags))
3737*4882a593Smuzhiyun 
3738*4882a593Smuzhiyun #define DHD_WAKE_SPIN_LOCK(lock, flags)	(flags) = osl_spin_lock(lock)
3739*4882a593Smuzhiyun #define DHD_WAKE_SPIN_UNLOCK(lock, flags)	osl_spin_unlock((lock), (flags))
3740*4882a593Smuzhiyun 
3741*4882a593Smuzhiyun /*
3742*4882a593Smuzhiyun  * Temporarily change log dump lock to spin_lock_irqsave as DHD_ERROR/DHD_LOG_MEM
3743*4882a593Smuzhiyun  * are being called from dhdpcie_bus_isr.
3744*4882a593Smuzhiyun  * This will be reverted after proper solution is implemented to handle isr prints
3745*4882a593Smuzhiyun  */
3746*4882a593Smuzhiyun #define DHD_LOG_DUMP_BUF_LOCK(lock, flags)	(flags) = osl_spin_lock_irq(lock)
3747*4882a593Smuzhiyun #define DHD_LOG_DUMP_BUF_UNLOCK(lock, flags)	osl_spin_unlock_irq((lock), (flags))
3748*4882a593Smuzhiyun 
3749*4882a593Smuzhiyun #define DHD_PKT_WAKE_LOCK(lock, flags)	(flags) = osl_spin_lock(lock)
3750*4882a593Smuzhiyun #define DHD_PKT_WAKE_UNLOCK(lock, flags)	osl_spin_unlock((lock), (flags))
3751*4882a593Smuzhiyun 
3752*4882a593Smuzhiyun #define DHD_OOB_IRQ_LOCK(lock, flags)	(flags) = osl_spin_lock(lock)
3753*4882a593Smuzhiyun #define DHD_OOB_IRQ_UNLOCK(lock, flags)	osl_spin_unlock((lock), (flags))
3754*4882a593Smuzhiyun 
3755*4882a593Smuzhiyun #define DHD_IF_STA_LIST_LOCK(lock, flags)	(flags) = osl_spin_lock(lock)
3756*4882a593Smuzhiyun #define DHD_IF_STA_LIST_UNLOCK(lock, flags)	osl_spin_unlock((lock), (flags))
3757*4882a593Smuzhiyun 
3758*4882a593Smuzhiyun #define DHD_DBG_RING_LOCK_INIT(osh)		osl_spin_lock_init(osh)
3759*4882a593Smuzhiyun #define DHD_DBG_RING_LOCK_DEINIT(osh, lock)	osl_spin_lock_deinit(osh, (lock))
3760*4882a593Smuzhiyun #define DHD_DBG_RING_LOCK(lock, flags)		(flags) = osl_spin_lock(lock)
3761*4882a593Smuzhiyun #define DHD_DBG_RING_UNLOCK(lock, flags)	osl_spin_unlock((lock), flags)
3762*4882a593Smuzhiyun 
3763*4882a593Smuzhiyun #ifdef DHD_MEM_STATS
3764*4882a593Smuzhiyun /* memory stats lock/unlock */
3765*4882a593Smuzhiyun #define DHD_MEM_STATS_LOCK(lock, flags)     (flags) = osl_spin_lock(lock)
3766*4882a593Smuzhiyun #define DHD_MEM_STATS_UNLOCK(lock, flags)   osl_spin_unlock((lock), (flags))
3767*4882a593Smuzhiyun #endif /* DHD_MEM_STATS */
3768*4882a593Smuzhiyun 
3769*4882a593Smuzhiyun extern void dhd_dump_to_kernelog(dhd_pub_t *dhdp);
3770*4882a593Smuzhiyun 
3771*4882a593Smuzhiyun #if defined(LINUX) || defined(linux)
3772*4882a593Smuzhiyun extern void dhd_print_tasklet_status(dhd_pub_t *dhd);
3773*4882a593Smuzhiyun #ifdef PCIE_INB_DW
3774*4882a593Smuzhiyun extern bool dhd_check_cfg_in_progress(dhd_pub_t *dhdp);
3775*4882a593Smuzhiyun #endif
3776*4882a593Smuzhiyun #else
dhd_print_tasklet_status(dhd_pub_t * dhd)3777*4882a593Smuzhiyun static INLINE void dhd_print_tasklet_status(dhd_pub_t *dhd) { }
dhd_check_cfg_in_progress(dhd_pub_t * dhdp)3778*4882a593Smuzhiyun static INLINE bool dhd_check_cfg_in_progress(dhd_pub_t *dhdp)
3779*4882a593Smuzhiyun { return FALSE; }
3780*4882a593Smuzhiyun #endif /* LINUX | linux */
3781*4882a593Smuzhiyun 
3782*4882a593Smuzhiyun #ifdef BCMDBUS
3783*4882a593Smuzhiyun extern uint dhd_get_rxsz(dhd_pub_t *pub);
3784*4882a593Smuzhiyun extern void dhd_set_path(dhd_pub_t *pub);
3785*4882a593Smuzhiyun extern void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
3786*4882a593Smuzhiyun extern void dhd_bus_clearcounts(dhd_pub_t *dhdp);
3787*4882a593Smuzhiyun #endif /* BCMDBUS */
3788*4882a593Smuzhiyun 
3789*4882a593Smuzhiyun #ifdef DHD_L2_FILTER
3790*4882a593Smuzhiyun extern int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx);
3791*4882a593Smuzhiyun extern int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val);
3792*4882a593Smuzhiyun extern int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx);
3793*4882a593Smuzhiyun extern int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val);
3794*4882a593Smuzhiyun extern int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx);
3795*4882a593Smuzhiyun extern int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val);
3796*4882a593Smuzhiyun extern int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx);
3797*4882a593Smuzhiyun extern int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val);
3798*4882a593Smuzhiyun extern int dhd_get_block_tdls_status(dhd_pub_t *dhdp, uint32 idx);
3799*4882a593Smuzhiyun extern int dhd_set_block_tdls_status(dhd_pub_t *dhdp, uint32 idx, int val);
3800*4882a593Smuzhiyun #endif /* DHD_L2_FILTER */
3801*4882a593Smuzhiyun 
3802*4882a593Smuzhiyun #if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
3803*4882a593Smuzhiyun extern int dhd_set_qosmap_up_table(dhd_pub_t *dhdp, uint32 idx, bcm_tlv_t *qos_map_ie);
3804*4882a593Smuzhiyun #endif /* BCM_ROUTER_DHD && QOS_MAP_SET */
3805*4882a593Smuzhiyun 
3806*4882a593Smuzhiyun typedef struct wl_io_pport {
3807*4882a593Smuzhiyun 	dhd_pub_t *dhd_pub;
3808*4882a593Smuzhiyun 	uint ifidx;
3809*4882a593Smuzhiyun } wl_io_pport_t;
3810*4882a593Smuzhiyun 
3811*4882a593Smuzhiyun typedef struct wl_evt_pport {
3812*4882a593Smuzhiyun 	dhd_pub_t *dhd_pub;
3813*4882a593Smuzhiyun 	int *ifidx;
3814*4882a593Smuzhiyun 	void *pktdata;
3815*4882a593Smuzhiyun 	uint data_len;
3816*4882a593Smuzhiyun 	void **data_ptr;
3817*4882a593Smuzhiyun 	void *raw_event;
3818*4882a593Smuzhiyun } wl_evt_pport_t;
3819*4882a593Smuzhiyun 
3820*4882a593Smuzhiyun extern void *dhd_pub_shim(dhd_pub_t *dhd_pub);
3821*4882a593Smuzhiyun #ifdef DHD_FW_COREDUMP
3822*4882a593Smuzhiyun void* dhd_get_fwdump_buf(dhd_pub_t *dhd_pub, uint32 length);
3823*4882a593Smuzhiyun #endif /* DHD_FW_COREDUMP */
3824*4882a593Smuzhiyun 
3825*4882a593Smuzhiyun #if defined(SET_XPS_CPUS)
3826*4882a593Smuzhiyun int dhd_xps_cpus_enable(struct net_device *net, int enable);
3827*4882a593Smuzhiyun int custom_xps_map_set(struct net_device *net, char *buf, size_t len);
3828*4882a593Smuzhiyun void custom_xps_map_clear(struct net_device *net);
3829*4882a593Smuzhiyun #endif
3830*4882a593Smuzhiyun 
3831*4882a593Smuzhiyun #if defined(SET_RPS_CPUS)
3832*4882a593Smuzhiyun int dhd_rps_cpus_enable(struct net_device *net, int enable);
3833*4882a593Smuzhiyun int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len);
3834*4882a593Smuzhiyun void custom_rps_map_clear(struct netdev_rx_queue *queue);
3835*4882a593Smuzhiyun #define PRIMARY_INF 0
3836*4882a593Smuzhiyun #define VIRTUAL_INF 1
3837*4882a593Smuzhiyun #if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890)
3838*4882a593Smuzhiyun #define RPS_CPUS_MASK "10"
3839*4882a593Smuzhiyun #define RPS_CPUS_MASK_P2P "10"
3840*4882a593Smuzhiyun #define RPS_CPUS_MASK_IBSS "10"
3841*4882a593Smuzhiyun #define RPS_CPUS_WLAN_CORE_ID 4
3842*4882a593Smuzhiyun #else
3843*4882a593Smuzhiyun #if defined(DHD_TPUT_PATCH)
3844*4882a593Smuzhiyun #define RPS_CPUS_MASK "f"
3845*4882a593Smuzhiyun #define RPS_CPUS_MASK_P2P "f"
3846*4882a593Smuzhiyun #define RPS_CPUS_MASK_IBSS "f"
3847*4882a593Smuzhiyun #else
3848*4882a593Smuzhiyun #define RPS_CPUS_MASK "6"
3849*4882a593Smuzhiyun #define RPS_CPUS_MASK_P2P "6"
3850*4882a593Smuzhiyun #define RPS_CPUS_MASK_IBSS "6"
3851*4882a593Smuzhiyun #endif
3852*4882a593Smuzhiyun #endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 */
3853*4882a593Smuzhiyun #endif // endif
3854*4882a593Smuzhiyun 
3855*4882a593Smuzhiyun int dhd_get_download_buffer(dhd_pub_t	*dhd, char *file_path, download_type_t component,
3856*4882a593Smuzhiyun 	char ** buffer, int *length);
3857*4882a593Smuzhiyun 
3858*4882a593Smuzhiyun void dhd_free_download_buffer(dhd_pub_t	*dhd, void *buffer, int length);
3859*4882a593Smuzhiyun 
3860*4882a593Smuzhiyun int dhd_download_blob(dhd_pub_t *dhd, unsigned char *buf,
3861*4882a593Smuzhiyun 		uint32 len, char *iovar);
3862*4882a593Smuzhiyun 
3863*4882a593Smuzhiyun int dhd_download_blob_cached(dhd_pub_t *dhd, char *file_path,
3864*4882a593Smuzhiyun 	uint32 len, char *iovar);
3865*4882a593Smuzhiyun 
3866*4882a593Smuzhiyun int dhd_apply_default_txcap(dhd_pub_t *dhd, char *txcap_path);
3867*4882a593Smuzhiyun int dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path);
3868*4882a593Smuzhiyun 
3869*4882a593Smuzhiyun #ifdef SHOW_LOGTRACE
3870*4882a593Smuzhiyun int dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size,
3871*4882a593Smuzhiyun 		dhd_event_log_t *event_log);
3872*4882a593Smuzhiyun int dhd_parse_map_file(osl_t *osh, void *file, uint32 *ramstart,
3873*4882a593Smuzhiyun 		uint32 *rodata_start, uint32 *rodata_end);
3874*4882a593Smuzhiyun #ifdef PCIE_FULL_DONGLE
3875*4882a593Smuzhiyun int dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t *dhdp, void *pktbuf,
3876*4882a593Smuzhiyun 		dhd_event_log_t *event_data);
3877*4882a593Smuzhiyun #endif /* PCIE_FULL_DONGLE */
3878*4882a593Smuzhiyun #endif /* SHOW_LOGTRACE */
3879*4882a593Smuzhiyun 
3880*4882a593Smuzhiyun /*
3881*4882a593Smuzhiyun  * control_logtrace:
3882*4882a593Smuzhiyun  * "0" -> do not print event log messages in any form
3883*4882a593Smuzhiyun  * "1" -> print event log messages as EL
3884*4882a593Smuzhiyun  * "2" -> print event log messages as formatted CONSOLE_E if logstrs.bin etc. files are available
3885*4882a593Smuzhiyun  */
3886*4882a593Smuzhiyun typedef enum logtrace_ctrl {
3887*4882a593Smuzhiyun 	LOGTRACE_DISABLE = 0,
3888*4882a593Smuzhiyun 	LOGTRACE_RAW_FMT = 1,
3889*4882a593Smuzhiyun 	LOGTRACE_PARSED_FMT = 2
3890*4882a593Smuzhiyun } logtrace_ctrl_t;
3891*4882a593Smuzhiyun 
3892*4882a593Smuzhiyun #define DEFAULT_CONTROL_LOGTRACE	LOGTRACE_PARSED_FMT
3893*4882a593Smuzhiyun #ifndef CUSTOM_CONTROL_LOGTRACE
3894*4882a593Smuzhiyun #define CUSTOM_CONTROL_LOGTRACE		DEFAULT_CONTROL_LOGTRACE
3895*4882a593Smuzhiyun #endif
3896*4882a593Smuzhiyun 
3897*4882a593Smuzhiyun extern uint8 control_logtrace;
3898*4882a593Smuzhiyun 
3899*4882a593Smuzhiyun #ifdef BTLOG
3900*4882a593Smuzhiyun int dhd_bt_log_pkt_process(dhd_pub_t *dhdp, void *pktbuf);
3901*4882a593Smuzhiyun #endif	/* BTLOG */
3902*4882a593Smuzhiyun 
3903*4882a593Smuzhiyun #if defined(NDIS)
3904*4882a593Smuzhiyun bool dhd_is_device_removed(dhd_pub_t *dhd);
3905*4882a593Smuzhiyun #else
3906*4882a593Smuzhiyun #define dhd_is_device_removed(x) FALSE
3907*4882a593Smuzhiyun #define dhd_os_ind_firmware_stall(x)
3908*4882a593Smuzhiyun #endif /* defined(NDIS) */
3909*4882a593Smuzhiyun 
3910*4882a593Smuzhiyun #if defined(DHD_FW_COREDUMP)
3911*4882a593Smuzhiyun #if defined(linux) || defined(LINUX)
3912*4882a593Smuzhiyun extern void dhd_get_memdump_info(dhd_pub_t *dhd);
3913*4882a593Smuzhiyun #else
dhd_get_memdump_info(dhd_pub_t * dhd)3914*4882a593Smuzhiyun static INLINE void dhd_get_memdump_info(dhd_pub_t *dhd)
3915*4882a593Smuzhiyun { return; }
3916*4882a593Smuzhiyun #endif /* linux || LINUX */
3917*4882a593Smuzhiyun #endif /* defined(DHD_FW_COREDUMP) */
3918*4882a593Smuzhiyun #ifdef BCMASSERT_LOG
3919*4882a593Smuzhiyun extern void dhd_get_assert_info(dhd_pub_t *dhd);
3920*4882a593Smuzhiyun #else
dhd_get_assert_info(dhd_pub_t * dhd)3921*4882a593Smuzhiyun static INLINE void dhd_get_assert_info(dhd_pub_t *dhd) { }
3922*4882a593Smuzhiyun #endif /* BCMASSERT_LOG */
3923*4882a593Smuzhiyun 
3924*4882a593Smuzhiyun #if defined(LINUX) || defined(linux)
3925*4882a593Smuzhiyun #define DMAXFER_FREE(dhdp, dmap) dhd_schedule_dmaxfer_free(dhdp, dmap);
3926*4882a593Smuzhiyun #else  /* !(LINUX || linux) */
3927*4882a593Smuzhiyun #define DMAXFER_FREE(dhdp, dmmap) dmaxfer_free_prev_dmaaddr(dhdp, dmmap);
3928*4882a593Smuzhiyun #endif  /* linux || LINUX */
3929*4882a593Smuzhiyun 
3930*4882a593Smuzhiyun #if defined(PCIE_FULL_DONGLE)
3931*4882a593Smuzhiyun extern void dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap);
3932*4882a593Smuzhiyun void dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap);
3933*4882a593Smuzhiyun #endif  /* PCIE_FULL_DONGLE */
3934*4882a593Smuzhiyun 
3935*4882a593Smuzhiyun #define DHD_LB_STATS_NOOP	do { /* noop */ } while (0)
3936*4882a593Smuzhiyun #if defined(DHD_LB_STATS)
3937*4882a593Smuzhiyun #include <bcmutils.h>
3938*4882a593Smuzhiyun extern void dhd_lb_stats_init(dhd_pub_t *dhd);
3939*4882a593Smuzhiyun extern void dhd_lb_stats_deinit(dhd_pub_t *dhd);
3940*4882a593Smuzhiyun extern void dhd_lb_stats_reset(dhd_pub_t *dhd);
3941*4882a593Smuzhiyun #ifdef DHD_MEM_STATS
3942*4882a593Smuzhiyun extern uint64 dhd_lb_mem_usage(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
3943*4882a593Smuzhiyun #endif /* DHD_MEM_STATS */
3944*4882a593Smuzhiyun extern void dhd_lb_stats_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
3945*4882a593Smuzhiyun extern void dhd_lb_stats_update_napi_histo(dhd_pub_t *dhdp, uint32 count);
3946*4882a593Smuzhiyun extern void dhd_lb_stats_update_txc_histo(dhd_pub_t *dhdp, uint32 count);
3947*4882a593Smuzhiyun extern void dhd_lb_stats_update_rxc_histo(dhd_pub_t *dhdp, uint32 count);
3948*4882a593Smuzhiyun extern void dhd_lb_stats_txc_percpu_cnt_incr(dhd_pub_t *dhdp);
3949*4882a593Smuzhiyun extern void dhd_lb_stats_rxc_percpu_cnt_incr(dhd_pub_t *dhdp);
3950*4882a593Smuzhiyun #define DHD_LB_STATS_INIT(dhdp)	dhd_lb_stats_init(dhdp)
3951*4882a593Smuzhiyun #define DHD_LB_STATS_DEINIT(dhdp) dhd_lb_stats_deinit(dhdp)
3952*4882a593Smuzhiyun /* Reset is called from common layer so it takes dhd_pub_t as argument */
3953*4882a593Smuzhiyun #define DHD_LB_STATS_RESET(dhdp) dhd_lb_stats_reset(dhdp)
3954*4882a593Smuzhiyun #define DHD_LB_STATS_CLR(x)	(x) = 0U
3955*4882a593Smuzhiyun #define DHD_LB_STATS_INCR(x)	(x) = (x) + 1
3956*4882a593Smuzhiyun #define DHD_LB_STATS_ADD(x, c)	(x) = (x) + (c)
3957*4882a593Smuzhiyun #define DHD_LB_STATS_PERCPU_ARR_INCR(x) \
3958*4882a593Smuzhiyun 	{ \
3959*4882a593Smuzhiyun 		int cpu = get_cpu(); put_cpu(); \
3960*4882a593Smuzhiyun 		DHD_LB_STATS_INCR(x[cpu]); \
3961*4882a593Smuzhiyun 	}
3962*4882a593Smuzhiyun #define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhdp, x)	dhd_lb_stats_update_napi_histo(dhdp, x)
3963*4882a593Smuzhiyun #else /* !DHD_LB_STATS */
3964*4882a593Smuzhiyun #define DHD_LB_STATS_INIT(dhdp)	 DHD_LB_STATS_NOOP
3965*4882a593Smuzhiyun #define DHD_LB_STATS_DEINIT(dhdp) DHD_LB_STATS_NOOP
3966*4882a593Smuzhiyun #define DHD_LB_STATS_RESET(dhdp) DHD_LB_STATS_NOOP
3967*4882a593Smuzhiyun #define DHD_LB_STATS_CLR(x)	 DHD_LB_STATS_NOOP
3968*4882a593Smuzhiyun #define DHD_LB_STATS_INCR(x)	 DHD_LB_STATS_NOOP
3969*4882a593Smuzhiyun #define DHD_LB_STATS_ADD(x, c)	 DHD_LB_STATS_NOOP
3970*4882a593Smuzhiyun #define DHD_LB_STATS_PERCPU_ARR_INCR(x)	 DHD_LB_STATS_NOOP
3971*4882a593Smuzhiyun #define DHD_LB_STATS_UPDATE_NAPI_HISTO(dhd, x) DHD_LB_STATS_NOOP
3972*4882a593Smuzhiyun #endif /* !DHD_LB_STATS */
3973*4882a593Smuzhiyun 
3974*4882a593Smuzhiyun #ifdef BCMDBG
3975*4882a593Smuzhiyun extern void dhd_schedule_macdbg_dump(dhd_pub_t *dhdp);
3976*4882a593Smuzhiyun #endif /* BCMDBG */
3977*4882a593Smuzhiyun 
3978*4882a593Smuzhiyun #ifdef DHD_SSSR_DUMP
3979*4882a593Smuzhiyun #ifdef DHD_SSSR_DUMP_BEFORE_SR
3980*4882a593Smuzhiyun #define DHD_SSSR_MEMPOOL_SIZE	(2 * 1024 * 1024) /* 2MB size */
3981*4882a593Smuzhiyun #else
3982*4882a593Smuzhiyun #define DHD_SSSR_MEMPOOL_SIZE	(1 * 1024 * 1024) /* 1MB size */
3983*4882a593Smuzhiyun #endif /* DHD_SSSR_DUMP_BEFORE_SR */
3984*4882a593Smuzhiyun 
3985*4882a593Smuzhiyun /* used in sssr_dump_mode */
3986*4882a593Smuzhiyun #define SSSR_DUMP_MODE_SSSR	0	/* dump both *before* and *after* files */
3987*4882a593Smuzhiyun #define SSSR_DUMP_MODE_FIS	1	/* dump *after* files only */
3988*4882a593Smuzhiyun 
3989*4882a593Smuzhiyun extern int dhd_sssr_mempool_init(dhd_pub_t *dhd);
3990*4882a593Smuzhiyun extern void dhd_sssr_mempool_deinit(dhd_pub_t *dhd);
3991*4882a593Smuzhiyun extern int dhd_sssr_dump_init(dhd_pub_t *dhd);
3992*4882a593Smuzhiyun extern void dhd_sssr_dump_deinit(dhd_pub_t *dhd);
3993*4882a593Smuzhiyun extern int dhdpcie_sssr_dump(dhd_pub_t *dhd);
3994*4882a593Smuzhiyun extern void dhd_sssr_print_filepath(dhd_pub_t *dhd, char *path);
3995*4882a593Smuzhiyun extern int dhd_sssr_reg_info_init(dhd_pub_t *dhd);
3996*4882a593Smuzhiyun extern void dhd_sssr_reg_info_deinit(dhd_pub_t *dhd);
3997*4882a593Smuzhiyun extern uint dhd_sssr_dig_buf_size(dhd_pub_t *dhdp);
3998*4882a593Smuzhiyun extern uint dhd_sssr_dig_buf_addr(dhd_pub_t *dhdp);
3999*4882a593Smuzhiyun extern uint dhd_sssr_mac_buf_size(dhd_pub_t *dhdp, uint8 core_idx);
4000*4882a593Smuzhiyun extern uint dhd_sssr_mac_xmtaddress(dhd_pub_t *dhdp, uint8 core_idx);
4001*4882a593Smuzhiyun extern uint dhd_sssr_mac_xmtdata(dhd_pub_t *dhdp, uint8 core_idx);
4002*4882a593Smuzhiyun 
4003*4882a593Smuzhiyun #define DHD_SSSR_MEMPOOL_INIT(dhdp)	dhd_sssr_mempool_init(dhdp)
4004*4882a593Smuzhiyun #define DHD_SSSR_MEMPOOL_DEINIT(dhdp) dhd_sssr_mempool_deinit(dhdp)
4005*4882a593Smuzhiyun #define DHD_SSSR_DUMP_INIT(dhdp)	dhd_sssr_dump_init(dhdp)
4006*4882a593Smuzhiyun #define DHD_SSSR_DUMP_DEINIT(dhdp) dhd_sssr_dump_deinit(dhdp)
4007*4882a593Smuzhiyun #define DHD_SSSR_PRINT_FILEPATH(dhdp, path) dhd_sssr_print_filepath(dhdp, path)
4008*4882a593Smuzhiyun #define DHD_SSSR_REG_INFO_INIT(dhdp)	dhd_sssr_reg_info_init(dhdp)
4009*4882a593Smuzhiyun #define DHD_SSSR_REG_INFO_DEINIT(dhdp) dhd_sssr_reg_info_deinit(dhdp)
4010*4882a593Smuzhiyun #else
4011*4882a593Smuzhiyun #define DHD_SSSR_MEMPOOL_INIT(dhdp)		do { /* noop */ } while (0)
4012*4882a593Smuzhiyun #define DHD_SSSR_MEMPOOL_DEINIT(dhdp)		do { /* noop */ } while (0)
4013*4882a593Smuzhiyun #define DHD_SSSR_DUMP_INIT(dhdp)		do { /* noop */ } while (0)
4014*4882a593Smuzhiyun #define DHD_SSSR_DUMP_DEINIT(dhdp)		do { /* noop */ } while (0)
4015*4882a593Smuzhiyun #define DHD_SSSR_PRINT_FILEPATH(dhdp, path)	do { /* noop */ } while (0)
4016*4882a593Smuzhiyun #define DHD_SSSR_REG_INFO_INIT(dhdp)		do { /* noop */ } while (0)
4017*4882a593Smuzhiyun #define DHD_SSSR_REG_INFO_DEINIT(dhdp)		do { /* noop */ } while (0)
4018*4882a593Smuzhiyun #endif /* DHD_SSSR_DUMP */
4019*4882a593Smuzhiyun 
4020*4882a593Smuzhiyun #ifdef BCMPCIE
4021*4882a593Smuzhiyun extern int dhd_prot_debug_info_print(dhd_pub_t *dhd);
4022*4882a593Smuzhiyun extern bool dhd_bus_skip_clm(dhd_pub_t *dhdp);
4023*4882a593Smuzhiyun extern void dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd);
4024*4882a593Smuzhiyun extern bool dhd_pcie_dump_int_regs(dhd_pub_t *dhd);
4025*4882a593Smuzhiyun #else
4026*4882a593Smuzhiyun #define dhd_prot_debug_info_print(x)
dhd_bus_skip_clm(dhd_pub_t * dhd_pub)4027*4882a593Smuzhiyun static INLINE bool dhd_bus_skip_clm(dhd_pub_t *dhd_pub)
4028*4882a593Smuzhiyun { return 0; }
4029*4882a593Smuzhiyun #endif /* BCMPCIE */
4030*4882a593Smuzhiyun 
4031*4882a593Smuzhiyun #if defined(LINUX) || defined(linux)
4032*4882a593Smuzhiyun void dhd_show_kirqstats(dhd_pub_t *dhd);
4033*4882a593Smuzhiyun #else
dhd_show_kirqstats(dhd_pub_t * dhd)4034*4882a593Smuzhiyun static INLINE void dhd_show_kirqstats(dhd_pub_t *dhd)
4035*4882a593Smuzhiyun { return; }
4036*4882a593Smuzhiyun #endif /* defined(LINUX) || defined(linux) */
4037*4882a593Smuzhiyun 
4038*4882a593Smuzhiyun /* Bitmask used for Join Timeout */
4039*4882a593Smuzhiyun #define WLC_SSID_MASK          0x01
4040*4882a593Smuzhiyun #define WLC_WPA_MASK           0x02
4041*4882a593Smuzhiyun 
4042*4882a593Smuzhiyun #if defined(LINUX) || defined(linux) || defined(DHD_EFI)
4043*4882a593Smuzhiyun fw_download_status_t dhd_fw_download_status(dhd_pub_t *dhd_pub);
4044*4882a593Smuzhiyun extern int dhd_start_join_timer(dhd_pub_t *pub);
4045*4882a593Smuzhiyun extern int dhd_stop_join_timer(dhd_pub_t *pub);
4046*4882a593Smuzhiyun extern int dhd_start_scan_timer(dhd_pub_t *pub, bool is_escan);
4047*4882a593Smuzhiyun extern int dhd_stop_scan_timer(dhd_pub_t *pub, bool is_escan, uint16 sync_id);
4048*4882a593Smuzhiyun extern int dhd_start_cmd_timer(dhd_pub_t *pub);
4049*4882a593Smuzhiyun extern int dhd_stop_cmd_timer(dhd_pub_t *pub);
4050*4882a593Smuzhiyun extern int dhd_start_bus_timer(dhd_pub_t *pub);
4051*4882a593Smuzhiyun extern int dhd_stop_bus_timer(dhd_pub_t *pub);
4052*4882a593Smuzhiyun extern uint16 dhd_get_request_id(dhd_pub_t *pub);
4053*4882a593Smuzhiyun extern int dhd_set_request_id(dhd_pub_t *pub, uint16 id, uint32 cmd);
4054*4882a593Smuzhiyun extern void dhd_clear_join_error(dhd_pub_t *pub, uint32 mask);
4055*4882a593Smuzhiyun extern void dhd_get_scan_to_val(dhd_pub_t *pub, uint32 *to_val);
4056*4882a593Smuzhiyun extern void dhd_set_scan_to_val(dhd_pub_t *pub, uint32 to_val);
4057*4882a593Smuzhiyun extern void dhd_get_join_to_val(dhd_pub_t *pub, uint32 *to_val);
4058*4882a593Smuzhiyun extern void dhd_set_join_to_val(dhd_pub_t *pub, uint32 to_val);
4059*4882a593Smuzhiyun extern void dhd_get_cmd_to_val(dhd_pub_t *pub, uint32 *to_val);
4060*4882a593Smuzhiyun extern void dhd_set_cmd_to_val(dhd_pub_t *pub, uint32 to_val);
4061*4882a593Smuzhiyun extern void dhd_get_bus_to_val(dhd_pub_t *pub, uint32 *to_val);
4062*4882a593Smuzhiyun extern void dhd_set_bus_to_val(dhd_pub_t *pub, uint32 to_val);
4063*4882a593Smuzhiyun extern int dhd_start_timesync_timer(dhd_pub_t *pub);
4064*4882a593Smuzhiyun extern int dhd_stop_timesync_timer(dhd_pub_t *pub);
4065*4882a593Smuzhiyun #else
dhd_fw_download_status(dhd_pub_t * dhd_pub)4066*4882a593Smuzhiyun static INLINE fw_download_status_t dhd_fw_download_status(dhd_pub_t *dhd_pub)
4067*4882a593Smuzhiyun { return FW_UNLOADED; }
dhd_start_join_timer(dhd_pub_t * pub)4068*4882a593Smuzhiyun static INLINE int dhd_start_join_timer(dhd_pub_t *pub) { return 0; }
dhd_stop_join_timer(dhd_pub_t * pub)4069*4882a593Smuzhiyun static INLINE int dhd_stop_join_timer(dhd_pub_t *pub) { return 0; }
dhd_start_scan_timer(dhd_pub_t * pub,bool is_escan)4070*4882a593Smuzhiyun static INLINE int dhd_start_scan_timer(dhd_pub_t *pub, bool is_escan) { return 0; }
dhd_stop_scan_timer(dhd_pub_t * pub,bool is_escan,uint16 sync_id)4071*4882a593Smuzhiyun static INLINE int dhd_stop_scan_timer(dhd_pub_t *pub, bool is_escan, uint16 sync_id) { return 0; }
dhd_start_cmd_timer(dhd_pub_t * pub)4072*4882a593Smuzhiyun static INLINE int dhd_start_cmd_timer(dhd_pub_t *pub) { return 0; }
dhd_stop_cmd_timer(dhd_pub_t * pub)4073*4882a593Smuzhiyun static INLINE int dhd_stop_cmd_timer(dhd_pub_t *pub) { return 0; }
dhd_start_bus_timer(dhd_pub_t * pub)4074*4882a593Smuzhiyun static INLINE int dhd_start_bus_timer(dhd_pub_t *pub) { return 0; }
dhd_stop_bus_timer(dhd_pub_t * pub)4075*4882a593Smuzhiyun static INLINE int dhd_stop_bus_timer(dhd_pub_t *pub) { return 0; }
dhd_get_request_id(dhd_pub_t * pub)4076*4882a593Smuzhiyun static INLINE uint16 dhd_get_request_id(dhd_pub_t *pub) {  return 0; }
dhd_set_request_id(dhd_pub_t * pub,uint16 id)4077*4882a593Smuzhiyun static INLINE int dhd_set_request_id(dhd_pub_t *pub, uint16 id) {  return 0; }
dhd_clear_join_error(dhd_pub_t * pub,uint32 mask)4078*4882a593Smuzhiyun static INLINE void dhd_clear_join_error(dhd_pub_t *pub, uint32 mask) {  return; }
dhd_get_scan_to_val(dhd_pub_t * pub,uint32 * to_val)4079*4882a593Smuzhiyun static INLINE void dhd_get_scan_to_val(dhd_pub_t *pub, uint32 *to_val) {  return; }
dhd_set_scan_to_val(dhd_pub_t * pub,uint32 to_val)4080*4882a593Smuzhiyun static INLINE void dhd_set_scan_to_val(dhd_pub_t *pub, uint32 to_val) {  return; }
dhd_get_join_to_val(dhd_pub_t * pub,uint32 * to_val)4081*4882a593Smuzhiyun static INLINE void dhd_get_join_to_val(dhd_pub_t *pub, uint32 *to_val) {  return; }
dhd_set_join_to_val(dhd_pub_t * pub,uint32 to_val)4082*4882a593Smuzhiyun static INLINE void dhd_set_join_to_val(dhd_pub_t *pub, uint32 to_val) {  return; }
dhd_get_cmd_to_val(dhd_pub_t * pub,uint32 * to_val)4083*4882a593Smuzhiyun static INLINE void dhd_get_cmd_to_val(dhd_pub_t *pub, uint32 *to_val) {  return; }
dhd_set_cmd_to_val(dhd_pub_t * pub,uint32 to_val)4084*4882a593Smuzhiyun static INLINE void dhd_set_cmd_to_val(dhd_pub_t *pub, uint32 to_val) {  return; }
dhd_get_bus_to_val(dhd_pub_t * pub,uint32 * to_val)4085*4882a593Smuzhiyun static INLINE void dhd_get_bus_to_val(dhd_pub_t *pub, uint32 *to_val) {  return; }
dhd_set_bus_to_val(dhd_pub_t * pub,uint32 to_val)4086*4882a593Smuzhiyun static INLINE void dhd_set_bus_to_val(dhd_pub_t *pub, uint32 to_val) {  return; }
dhd_start_timesync_timer(dhd_pub_t * pub)4087*4882a593Smuzhiyun static INLINE int dhd_start_timesync_timer(dhd_pub_t *pub) { return 0; }
dhd_stop_timesync_timer(dhd_pub_t * pub)4088*4882a593Smuzhiyun static INLINE int dhd_stop_timesync_timer(dhd_pub_t *pub) { return 0; }
4089*4882a593Smuzhiyun #endif /* defined(LINUX) || defined(linux) */
4090*4882a593Smuzhiyun 
4091*4882a593Smuzhiyun #ifdef DHD_PKTID_AUDIT_ENABLED
4092*4882a593Smuzhiyun #if defined(LINUX) || defined(linux)
4093*4882a593Smuzhiyun void dhd_pktid_error_handler(dhd_pub_t *dhdp);
4094*4882a593Smuzhiyun #else /* !(LINUX || linux) */
dhd_pktid_error_handler(dhd_pub_t * dhdp)4095*4882a593Smuzhiyun static INLINE void dhd_pktid_error_handler(dhd_pub_t *dhdp) { ASSERT(0); }
4096*4882a593Smuzhiyun #endif /* LINUX || linux */
4097*4882a593Smuzhiyun #endif /* DHD_PKTID_AUDIT_ENABLED */
4098*4882a593Smuzhiyun 
4099*4882a593Smuzhiyun #ifdef DHD_MAP_PKTID_LOGGING
4100*4882a593Smuzhiyun #if defined(LINUX) || defined(linux)
4101*4882a593Smuzhiyun extern void dhd_pktid_logging_dump(dhd_pub_t *dhdp);
4102*4882a593Smuzhiyun #else /* !(LINUX || linux) */
dhd_pktid_logging_dump(dhd_pub_t * dhdp)4103*4882a593Smuzhiyun static INLINE void dhd_pktid_logging_dump(dhd_pub_t *dhdp) { }
4104*4882a593Smuzhiyun #endif /* LINUX || linux */
4105*4882a593Smuzhiyun #endif /* DHD_MAP_PKTID_LOGGING */
4106*4882a593Smuzhiyun 
4107*4882a593Smuzhiyun #ifdef DHD_PCIE_RUNTIMEPM
4108*4882a593Smuzhiyun #define DEFAULT_DHD_RUNTIME_MS 100
4109*4882a593Smuzhiyun #ifndef CUSTOM_DHD_RUNTIME_MS
4110*4882a593Smuzhiyun #define CUSTOM_DHD_RUNTIME_MS DEFAULT_DHD_RUNTIME_MS
4111*4882a593Smuzhiyun #endif /* CUSTOM_DHD_RUNTIME_MS */
4112*4882a593Smuzhiyun 
4113*4882a593Smuzhiyun #ifndef MAX_IDLE_COUNT
4114*4882a593Smuzhiyun #define MAX_IDLE_COUNT 11
4115*4882a593Smuzhiyun #endif /* MAX_IDLE_COUNT */
4116*4882a593Smuzhiyun 
4117*4882a593Smuzhiyun extern bool dhd_runtimepm_state(dhd_pub_t *dhd);
4118*4882a593Smuzhiyun extern bool dhd_runtime_bus_wake(struct dhd_bus *bus, bool wait, void *func_addr);
4119*4882a593Smuzhiyun extern bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void *func_addr);
4120*4882a593Smuzhiyun extern void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp);
4121*4882a593Smuzhiyun extern bool dhdpcie_is_resume_done(dhd_pub_t *dhdp);
4122*4882a593Smuzhiyun extern void dhd_runtime_pm_disable(dhd_pub_t *dhdp);
4123*4882a593Smuzhiyun extern void dhd_runtime_pm_enable(dhd_pub_t *dhdp);
4124*4882a593Smuzhiyun /* Disable the Runtime PM thread and wake up if the bus is already in suspend */
4125*4882a593Smuzhiyun #define DHD_DISABLE_RUNTIME_PM(dhdp) \
4126*4882a593Smuzhiyun do { \
4127*4882a593Smuzhiyun 	dhd_runtime_pm_disable(dhdp); \
4128*4882a593Smuzhiyun } while (0);
4129*4882a593Smuzhiyun 
4130*4882a593Smuzhiyun /* Enable the Runtime PM thread */
4131*4882a593Smuzhiyun #define DHD_ENABLE_RUNTIME_PM(dhdp) \
4132*4882a593Smuzhiyun do { \
4133*4882a593Smuzhiyun 	dhd_runtime_pm_enable(dhdp); \
4134*4882a593Smuzhiyun } while (0);
4135*4882a593Smuzhiyun 
4136*4882a593Smuzhiyun /* Stop the timer and disable RPM thread */
4137*4882a593Smuzhiyun #define DHD_STOP_RPM_TIMER(dhdp) \
4138*4882a593Smuzhiyun do { \
4139*4882a593Smuzhiyun 	dhd_os_runtimepm_timer(dhdp, 0); \
4140*4882a593Smuzhiyun 	DHD_DISABLE_RUNTIME_PM(dhdp) \
4141*4882a593Smuzhiyun } while (0);
4142*4882a593Smuzhiyun 
4143*4882a593Smuzhiyun /* Start the timer and enable RPM thread */
4144*4882a593Smuzhiyun #define DHD_START_RPM_TIMER(dhdp) \
4145*4882a593Smuzhiyun do { \
4146*4882a593Smuzhiyun 	dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms); \
4147*4882a593Smuzhiyun 	DHD_ENABLE_RUNTIME_PM(dhdp) \
4148*4882a593Smuzhiyun } while (0);
4149*4882a593Smuzhiyun #else
4150*4882a593Smuzhiyun #define DHD_DISABLE_RUNTIME_PM(dhdp)
4151*4882a593Smuzhiyun #define DHD_ENABLE_RUNTIME_PM(dhdp)
4152*4882a593Smuzhiyun #define DHD_STOP_RPM_TIMER(dhdp)
4153*4882a593Smuzhiyun #define DHD_START_RPM_TIMER(dhdp)
4154*4882a593Smuzhiyun #endif /* DHD_PCIE_RUNTIMEPM */
4155*4882a593Smuzhiyun 
4156*4882a593Smuzhiyun extern bool dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info);
4157*4882a593Smuzhiyun extern void dhd_prot_dump_ring_ptrs(void *prot_info);
4158*4882a593Smuzhiyun 
4159*4882a593Smuzhiyun #if defined(LINUX) || defined(linux)
4160*4882a593Smuzhiyun #if defined(DHD_TRACE_WAKE_LOCK)
4161*4882a593Smuzhiyun void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp);
4162*4882a593Smuzhiyun #endif
4163*4882a593Smuzhiyun #endif /* LINUX || linux */
4164*4882a593Smuzhiyun 
4165*4882a593Smuzhiyun extern bool dhd_query_bus_erros(dhd_pub_t *dhdp);
4166*4882a593Smuzhiyun void dhd_clear_bus_errors(dhd_pub_t *dhdp);
4167*4882a593Smuzhiyun 
4168*4882a593Smuzhiyun #if (defined(linux) || defined(LINUX)) && defined(CONFIG_64BIT)
4169*4882a593Smuzhiyun #define DHD_SUPPORT_64BIT
4170*4882a593Smuzhiyun #elif defined(DHD_EFI)
4171*4882a593Smuzhiyun #define DHD_SUPPORT_64BIT
4172*4882a593Smuzhiyun /* by default disabled for other platforms, can enable appropriate macro to enable 64 bit support */
4173*4882a593Smuzhiyun #endif /* (linux || LINUX) && CONFIG_64BIT */
4174*4882a593Smuzhiyun 
4175*4882a593Smuzhiyun #if defined(DHD_EFI) || defined(DHD_ERPOM)
4176*4882a593Smuzhiyun extern void dhd_schedule_reset(dhd_pub_t *dhdp);
4177*4882a593Smuzhiyun #else
dhd_schedule_reset(dhd_pub_t * dhdp)4178*4882a593Smuzhiyun static INLINE void dhd_schedule_reset(dhd_pub_t *dhdp) {;}
4179*4882a593Smuzhiyun #endif /* DHD_EFI || DHD_ERPOM */
4180*4882a593Smuzhiyun 
4181*4882a593Smuzhiyun extern void init_dhd_timeouts(dhd_pub_t *pub);
4182*4882a593Smuzhiyun extern void deinit_dhd_timeouts(dhd_pub_t *pub);
4183*4882a593Smuzhiyun 
4184*4882a593Smuzhiyun typedef enum timeout_resons {
4185*4882a593Smuzhiyun 	DHD_REASON_COMMAND_TO,
4186*4882a593Smuzhiyun 	DHD_REASON_JOIN_TO,
4187*4882a593Smuzhiyun 	DHD_REASON_SCAN_TO,
4188*4882a593Smuzhiyun 	DHD_REASON_OQS_TO
4189*4882a593Smuzhiyun } timeout_reasons_t;
4190*4882a593Smuzhiyun 
4191*4882a593Smuzhiyun #ifdef REPORT_FATAL_TIMEOUTS
4192*4882a593Smuzhiyun void dhd_send_trap_to_fw_for_timeout(dhd_pub_t * pub, timeout_reasons_t reason);
4193*4882a593Smuzhiyun #endif
4194*4882a593Smuzhiyun #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
4195*4882a593Smuzhiyun extern int dhd_bus_set_device_wake(struct dhd_bus *bus, bool val);
4196*4882a593Smuzhiyun extern void dhd_bus_dw_deassert(dhd_pub_t *dhd);
4197*4882a593Smuzhiyun #endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
4198*4882a593Smuzhiyun extern void dhd_prhex(const char *msg, volatile uchar *buf, uint nbytes, uint8 dbg_level);
4199*4882a593Smuzhiyun int dhd_tput_test(dhd_pub_t *dhd, tput_test_t *tput_data);
4200*4882a593Smuzhiyun void dhd_tput_test_rx(dhd_pub_t *dhd, void *pkt);
4201*4882a593Smuzhiyun #ifdef DHD_EFI
4202*4882a593Smuzhiyun int dhd_get_max_txbufs(dhd_pub_t *dhdp);
4203*4882a593Smuzhiyun #else
dhd_get_max_txbufs(dhd_pub_t * dhdp)4204*4882a593Smuzhiyun static INLINE int dhd_get_max_txbufs(dhd_pub_t *dhdp)
4205*4882a593Smuzhiyun { return -1; }
4206*4882a593Smuzhiyun #endif
4207*4882a593Smuzhiyun 
4208*4882a593Smuzhiyun #ifdef FILTER_IE
4209*4882a593Smuzhiyun int dhd_read_from_file(dhd_pub_t *dhd);
4210*4882a593Smuzhiyun int dhd_parse_filter_ie(dhd_pub_t *dhd, uint8 *buf);
4211*4882a593Smuzhiyun int dhd_get_filter_ie_count(dhd_pub_t *dhd, uint8 *buf);
4212*4882a593Smuzhiyun int dhd_parse_oui(dhd_pub_t *dhd, uint8 *inbuf, uint8 *oui, int len);
4213*4882a593Smuzhiyun int dhd_check_valid_ie(dhd_pub_t *dhdp, uint8 *buf, int len);
4214*4882a593Smuzhiyun #endif /* FILTER_IE */
4215*4882a593Smuzhiyun 
4216*4882a593Smuzhiyun uint16 dhd_prot_get_ioctl_trans_id(dhd_pub_t *dhdp);
4217*4882a593Smuzhiyun 
4218*4882a593Smuzhiyun #ifdef SET_PCIE_IRQ_CPU_CORE
4219*4882a593Smuzhiyun enum {
4220*4882a593Smuzhiyun 	DHD_AFFINITY_OFF = 0,
4221*4882a593Smuzhiyun 	DHD_AFFINITY_TPUT_150MBPS,
4222*4882a593Smuzhiyun 	DHD_AFFINITY_TPUT_300MBPS,
4223*4882a593Smuzhiyun 	DHD_AFFINITY_LAST
4224*4882a593Smuzhiyun };
4225*4882a593Smuzhiyun 
4226*4882a593Smuzhiyun extern void dhd_set_irq_cpucore(dhd_pub_t *dhdp, int affinity_cmd);
4227*4882a593Smuzhiyun #endif /* SET_PCIE_IRQ_CPU_CORE */
4228*4882a593Smuzhiyun #if defined(DHD_HANG_SEND_UP_TEST)
4229*4882a593Smuzhiyun extern void dhd_make_hang_with_reason(struct net_device *dev, const char *string_num);
4230*4882a593Smuzhiyun #endif /* DHD_HANG_SEND_UP_TEST */
4231*4882a593Smuzhiyun #ifdef BTLOG
4232*4882a593Smuzhiyun extern void dhd_rx_bt_log(dhd_pub_t *dhdp, void *pkt);
4233*4882a593Smuzhiyun #endif	/* BTLOG */
4234*4882a593Smuzhiyun 
4235*4882a593Smuzhiyun #ifdef DHD_RND_DEBUG
4236*4882a593Smuzhiyun int dhd_dump_rnd_info(dhd_pub_t *dhd, uint8 *rnd_buf, uint32 rnd_len);
4237*4882a593Smuzhiyun int dhd_get_rnd_info(dhd_pub_t *dhd);
4238*4882a593Smuzhiyun #endif /* DHD_RND_DEBUG */
4239*4882a593Smuzhiyun 
4240*4882a593Smuzhiyun #ifdef DHD_WAKE_STATUS
4241*4882a593Smuzhiyun wake_counts_t* dhd_get_wakecount(dhd_pub_t *dhdp);
4242*4882a593Smuzhiyun #endif /* DHD_WAKE_STATUS */
4243*4882a593Smuzhiyun extern int dhd_get_random_bytes(uint8 *buf, uint len);
4244*4882a593Smuzhiyun 
4245*4882a593Smuzhiyun #if defined(DHD_BLOB_EXISTENCE_CHECK)
4246*4882a593Smuzhiyun extern void dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path);
4247*4882a593Smuzhiyun #endif /* DHD_BLOB_EXISTENCE_CHECK */
4248*4882a593Smuzhiyun 
4249*4882a593Smuzhiyun int dhd_get_preserve_log_numbers(dhd_pub_t *dhd, uint32 *logset_mask);
4250*4882a593Smuzhiyun #ifdef DHD_LOG_DUMP
4251*4882a593Smuzhiyun void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type);
4252*4882a593Smuzhiyun void dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd);
4253*4882a593Smuzhiyun #endif
4254*4882a593Smuzhiyun 
4255*4882a593Smuzhiyun #ifdef DHD_LOG_DUMP
4256*4882a593Smuzhiyun int dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file,
4257*4882a593Smuzhiyun 		unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr, char *text_hdr,
4258*4882a593Smuzhiyun 		uint32 sec_type);
4259*4882a593Smuzhiyun int dhd_dump_debug_ring(dhd_pub_t *dhdp, void *ring_ptr, const void *user_buf,
4260*4882a593Smuzhiyun 		log_dump_section_hdr_t *sec_hdr, char *text_hdr, int buflen, uint32 sec_type);
4261*4882a593Smuzhiyun int dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp,
4262*4882a593Smuzhiyun 	const void *user_buf, unsigned long *f_pos);
4263*4882a593Smuzhiyun int dhd_log_dump_cookie(dhd_pub_t *dhdp, const void *user_buf);
4264*4882a593Smuzhiyun uint32 dhd_log_dump_cookie_len(dhd_pub_t *dhdp);
4265*4882a593Smuzhiyun int dhd_logdump_cookie_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size);
4266*4882a593Smuzhiyun void dhd_logdump_cookie_deinit(dhd_pub_t *dhdp);
4267*4882a593Smuzhiyun void dhd_logdump_cookie_save(dhd_pub_t *dhdp, char *cookie, char *type);
4268*4882a593Smuzhiyun int dhd_logdump_cookie_get(dhd_pub_t *dhdp, char *ret_cookie, uint32 buf_size);
4269*4882a593Smuzhiyun int dhd_logdump_cookie_count(dhd_pub_t *dhdp);
4270*4882a593Smuzhiyun int dhd_get_dld_log_dump(void *dev, dhd_pub_t *dhdp, const void *user_buf, void *fp,
4271*4882a593Smuzhiyun 	uint32 len, int type, void *pos);
4272*4882a593Smuzhiyun #if defined(BCMPCIE)
4273*4882a593Smuzhiyun int dhd_print_ext_trap_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
4274*4882a593Smuzhiyun 	void *fp, uint32 len, void *pos);
4275*4882a593Smuzhiyun uint32 dhd_get_ext_trap_len(void *ndev, dhd_pub_t *dhdp);
4276*4882a593Smuzhiyun #endif /* BCMPCIE */
4277*4882a593Smuzhiyun int dhd_print_dump_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
4278*4882a593Smuzhiyun 	void *fp, uint32 len, void *pos);
4279*4882a593Smuzhiyun int dhd_print_cookie_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
4280*4882a593Smuzhiyun 	void *fp, uint32 len, void *pos);
4281*4882a593Smuzhiyun int dhd_print_health_chk_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
4282*4882a593Smuzhiyun 	void *fp, uint32 len, void *pos);
4283*4882a593Smuzhiyun int dhd_print_time_str(const void *user_buf, void *fp, uint32 len, void *pos);
4284*4882a593Smuzhiyun #ifdef DHD_DUMP_PCIE_RINGS
4285*4882a593Smuzhiyun int dhd_print_flowring_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
4286*4882a593Smuzhiyun 	void *fp, uint32 len, void *pos);
4287*4882a593Smuzhiyun uint32 dhd_get_flowring_len(void *ndev, dhd_pub_t *dhdp);
4288*4882a593Smuzhiyun #endif /* DHD_DUMP_PCIE_RINGS */
4289*4882a593Smuzhiyun #ifdef DHD_STATUS_LOGGING
4290*4882a593Smuzhiyun extern int dhd_print_status_log_data(void *dev, dhd_pub_t *dhdp,
4291*4882a593Smuzhiyun 	const void *user_buf, void *fp, uint32 len, void *pos);
4292*4882a593Smuzhiyun extern uint32 dhd_get_status_log_len(void *ndev, dhd_pub_t *dhdp);
4293*4882a593Smuzhiyun #endif /* DHD_STATUS_LOGGING */
4294*4882a593Smuzhiyun int dhd_print_ecntrs_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
4295*4882a593Smuzhiyun 	void *fp, uint32 len, void *pos);
4296*4882a593Smuzhiyun int dhd_print_rtt_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
4297*4882a593Smuzhiyun 	void *fp, uint32 len, void *pos);
4298*4882a593Smuzhiyun int dhd_get_debug_dump_file_name(void *dev, dhd_pub_t *dhdp,
4299*4882a593Smuzhiyun 	char *dump_path, int size);
4300*4882a593Smuzhiyun uint32 dhd_get_time_str_len(void);
4301*4882a593Smuzhiyun uint32 dhd_get_health_chk_len(void *ndev, dhd_pub_t *dhdp);
4302*4882a593Smuzhiyun uint32 dhd_get_dhd_dump_len(void *ndev, dhd_pub_t *dhdp);
4303*4882a593Smuzhiyun uint32 dhd_get_cookie_log_len(void *ndev, dhd_pub_t *dhdp);
4304*4882a593Smuzhiyun uint32 dhd_get_ecntrs_len(void *ndev, dhd_pub_t *dhdp);
4305*4882a593Smuzhiyun uint32 dhd_get_rtt_len(void *ndev, dhd_pub_t *dhdp);
4306*4882a593Smuzhiyun uint32 dhd_get_dld_len(int log_type);
4307*4882a593Smuzhiyun void dhd_init_sec_hdr(log_dump_section_hdr_t *sec_hdr);
4308*4882a593Smuzhiyun extern char *dhd_log_dump_get_timestamp(void);
4309*4882a593Smuzhiyun bool dhd_log_dump_ecntr_enabled(void);
4310*4882a593Smuzhiyun bool dhd_log_dump_rtt_enabled(void);
4311*4882a593Smuzhiyun void dhd_nla_put_sssr_dump_len(void *ndev, uint32 *arr_len);
4312*4882a593Smuzhiyun int dhd_get_debug_dump(void *dev, const void *user_buf, uint32 len, int type);
4313*4882a593Smuzhiyun #ifdef DHD_SSSR_DUMP_BEFORE_SR
4314*4882a593Smuzhiyun int
4315*4882a593Smuzhiyun dhd_sssr_dump_d11_buf_before(void *dev, const void *user_buf, uint32 len, int core);
4316*4882a593Smuzhiyun int
4317*4882a593Smuzhiyun dhd_sssr_dump_dig_buf_before(void *dev, const void *user_buf, uint32 len);
4318*4882a593Smuzhiyun #endif /* DHD_SSSR_DUMP_BEFORE_SR */
4319*4882a593Smuzhiyun int
4320*4882a593Smuzhiyun dhd_sssr_dump_d11_buf_after(void *dev, const void *user_buf, uint32 len, int core);
4321*4882a593Smuzhiyun int
4322*4882a593Smuzhiyun dhd_sssr_dump_dig_buf_after(void *dev, const void *user_buf, uint32 len);
4323*4882a593Smuzhiyun #ifdef DHD_PKT_LOGGING
4324*4882a593Smuzhiyun extern int dhd_os_get_pktlog_dump(void *dev, const void *user_buf, uint32 len);
4325*4882a593Smuzhiyun extern uint32 dhd_os_get_pktlog_dump_size(struct net_device *dev);
4326*4882a593Smuzhiyun extern void dhd_os_get_pktlogdump_filename(struct net_device *dev, char *dump_path, int len);
4327*4882a593Smuzhiyun #endif /* DHD_PKT_LOGGING */
4328*4882a593Smuzhiyun 
4329*4882a593Smuzhiyun #ifdef DNGL_AXI_ERROR_LOGGING
4330*4882a593Smuzhiyun extern int dhd_os_get_axi_error_dump(void *dev, const void *user_buf, uint32 len);
4331*4882a593Smuzhiyun extern int dhd_os_get_axi_error_dump_size(struct net_device *dev);
4332*4882a593Smuzhiyun extern void dhd_os_get_axi_error_filename(struct net_device *dev, char *dump_path, int len);
4333*4882a593Smuzhiyun #endif /*  DNGL_AXI_ERROR_LOGGING */
4334*4882a593Smuzhiyun 
4335*4882a593Smuzhiyun #endif /* DHD_LOG_DUMP */
4336*4882a593Smuzhiyun 
4337*4882a593Smuzhiyun #define DHD_WORD_TO_LEN_SHIFT		(2u)       /* WORD to BYTES SHIFT */
4338*4882a593Smuzhiyun 
4339*4882a593Smuzhiyun #if defined(linux) || defined(LINUX) || defined(DHD_EFI)
4340*4882a593Smuzhiyun int dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf, uint32 buf_len, void *pos);
4341*4882a593Smuzhiyun #else
dhd_export_debug_data(void * mem_buf,void * fp,const void * user_buf,uint32 buf_len,void * pos)4342*4882a593Smuzhiyun static int dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf,
4343*4882a593Smuzhiyun 	uint32 buf_len, void *pos)
4344*4882a593Smuzhiyun { return 0; }
4345*4882a593Smuzhiyun #endif /* linux || LINUX */
4346*4882a593Smuzhiyun #if defined(linux) || defined(LINUX)
4347*4882a593Smuzhiyun #define DHD_PCIE_CONFIG_SAVE(bus)	pci_save_state(bus->dev)
4348*4882a593Smuzhiyun #define DHD_PCIE_CONFIG_RESTORE(bus)	pci_restore_state(bus->dev)
4349*4882a593Smuzhiyun #elif defined(DHD_EFI) || defined(NDIS)
4350*4882a593Smuzhiyun /* For EFI the pcie config space which is saved during init
4351*4882a593Smuzhiyun * is the one that should always be restored, so NOP for save
4352*4882a593Smuzhiyun */
4353*4882a593Smuzhiyun #define DHD_PCIE_CONFIG_SAVE(bus)
4354*4882a593Smuzhiyun #define DHD_PCIE_CONFIG_RESTORE(bus)	dhdpcie_config_restore(bus, TRUE)
4355*4882a593Smuzhiyun #else
4356*4882a593Smuzhiyun #define DHD_PCIE_CONFIG_SAVE(bus)	do { /* noop */ } while (0)
4357*4882a593Smuzhiyun #define DHD_PCIE_CONFIG_RESTORE(bus)	do { /* noop */ } while (0)
4358*4882a593Smuzhiyun #endif /* linux || LINUX */
4359*4882a593Smuzhiyun 
4360*4882a593Smuzhiyun typedef struct dhd_pkt_parse {
4361*4882a593Smuzhiyun 	uint32 proto;	/* Network layer protocol */
4362*4882a593Smuzhiyun 	uint32 t1;	/* n-tuple */
4363*4882a593Smuzhiyun 	uint32 t2;
4364*4882a593Smuzhiyun } dhd_pkt_parse_t;
4365*4882a593Smuzhiyun 
4366*4882a593Smuzhiyun /* ========= RING API functions : exposed to others ============= */
4367*4882a593Smuzhiyun #define DHD_RING_TYPE_FIXED		1
4368*4882a593Smuzhiyun #define DHD_RING_TYPE_SINGLE_IDX	2
4369*4882a593Smuzhiyun uint32 dhd_ring_get_hdr_size(void);
4370*4882a593Smuzhiyun void *dhd_ring_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size, uint32 elem_size,
4371*4882a593Smuzhiyun 	uint32 elem_cnt, uint32 type);
4372*4882a593Smuzhiyun void dhd_ring_deinit(dhd_pub_t *dhdp, void *_ring);
4373*4882a593Smuzhiyun void *dhd_ring_get_first(void *_ring);
4374*4882a593Smuzhiyun void dhd_ring_free_first(void *_ring);
4375*4882a593Smuzhiyun void dhd_ring_set_read_idx(void *_ring, uint32 read_idx);
4376*4882a593Smuzhiyun void dhd_ring_set_write_idx(void *_ring, uint32 write_idx);
4377*4882a593Smuzhiyun uint32 dhd_ring_get_read_idx(void *_ring);
4378*4882a593Smuzhiyun uint32 dhd_ring_get_write_idx(void *_ring);
4379*4882a593Smuzhiyun void *dhd_ring_get_last(void *_ring);
4380*4882a593Smuzhiyun void *dhd_ring_get_next(void *_ring, void *cur);
4381*4882a593Smuzhiyun void *dhd_ring_get_prev(void *_ring, void *cur);
4382*4882a593Smuzhiyun void *dhd_ring_get_empty(void *_ring);
4383*4882a593Smuzhiyun int dhd_ring_get_cur_size(void *_ring);
4384*4882a593Smuzhiyun void dhd_ring_lock(void *ring, void *fist_ptr, void *last_ptr);
4385*4882a593Smuzhiyun void dhd_ring_lock_free(void *ring);
4386*4882a593Smuzhiyun void *dhd_ring_lock_get_first(void *_ring);
4387*4882a593Smuzhiyun void *dhd_ring_lock_get_last(void *_ring);
4388*4882a593Smuzhiyun int dhd_ring_lock_get_count(void *_ring);
4389*4882a593Smuzhiyun void dhd_ring_lock_free_first(void *ring);
4390*4882a593Smuzhiyun void dhd_ring_whole_lock(void *ring);
4391*4882a593Smuzhiyun void dhd_ring_whole_unlock(void *ring);
4392*4882a593Smuzhiyun 
4393*4882a593Smuzhiyun #ifdef GDB_PROXY
4394*4882a593Smuzhiyun /** Firmware loaded and GDB proxy may access memory and registers */
4395*4882a593Smuzhiyun #define DHD_GDB_PROXY_PROBE_ACCESS_ENABLED		0x00000001
4396*4882a593Smuzhiyun /** Firmware loaded, access to it is enabled but it is not running yet */
4397*4882a593Smuzhiyun #define DHD_GDB_PROXY_PROBE_FIRMWARE_NOT_RUNNING	0x00000002
4398*4882a593Smuzhiyun /** Firmware is running */
4399*4882a593Smuzhiyun #define DHD_GDB_PROXY_PROBE_FIRMWARE_RUNNING		0x00000004
4400*4882a593Smuzhiyun /** Firmware was started in bootloader mode */
4401*4882a593Smuzhiyun #define DHD_GDB_PROXY_PROBE_BOOTLOADER_MODE		0x00000008
4402*4882a593Smuzhiyun /** Host memory code offload present */
4403*4882a593Smuzhiyun #define DHD_GDB_PROXY_PROBE_HOSTMEM_CODE		0x00000010
4404*4882a593Smuzhiyun 
4405*4882a593Smuzhiyun /* Data structure, returned by "gdb_proxy_probe" iovar */
4406*4882a593Smuzhiyun typedef struct dhd_gdb_proxy_probe_data {
4407*4882a593Smuzhiyun 	uint32 data_len;		/* Length of data in structure */
4408*4882a593Smuzhiyun 	uint32 magic;			/* Must contain DHD_IOCTL_MAGIC */
4409*4882a593Smuzhiyun 	uint32 flags;			/* Set of DHD_GDB_PROXY_PROBE_... bits */
4410*4882a593Smuzhiyun 	uint32 last_id;			/* 0 or proxy ID last set */
4411*4882a593Smuzhiyun 	uint32 hostmem_code_win_base;	/* Hostmem code window start in ARM physical address space
4412*4882a593Smuzhiyun 					 */
4413*4882a593Smuzhiyun 	uint32 hostmem_code_win_length;	/* Hostmem code window length */
4414*4882a593Smuzhiyun } dhd_gdb_proxy_probe_data_t;
4415*4882a593Smuzhiyun #endif /* GDB_PROXY */
4416*4882a593Smuzhiyun 
4417*4882a593Smuzhiyun #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
4418*4882a593Smuzhiyun void dhd_clear_awdl_stats(dhd_pub_t *dhd);
4419*4882a593Smuzhiyun #endif /* DHD_AWDL && AWDL_SLOT_STATS */
4420*4882a593Smuzhiyun #ifdef DHD_EFI
4421*4882a593Smuzhiyun extern void dhd_insert_random_mac_addr(dhd_pub_t *dhd, char *nvram_mem, uint *len);
4422*4882a593Smuzhiyun #endif /* DHD_EFI */
4423*4882a593Smuzhiyun 
4424*4882a593Smuzhiyun #ifdef PKT_FILTER_SUPPORT
4425*4882a593Smuzhiyun extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg);
4426*4882a593Smuzhiyun extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
4427*4882a593Smuzhiyun extern void dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id);
4428*4882a593Smuzhiyun #endif
4429*4882a593Smuzhiyun 
4430*4882a593Smuzhiyun #ifdef DHD_DUMP_PCIE_RINGS
4431*4882a593Smuzhiyun extern int dhd_d2h_h2d_ring_dump(dhd_pub_t *dhd, void *file, const void *user_buf,
4432*4882a593Smuzhiyun 	unsigned long *file_posn, bool file_write);
4433*4882a593Smuzhiyun #endif /* DHD_DUMP_PCIE_RINGS */
4434*4882a593Smuzhiyun 
4435*4882a593Smuzhiyun #ifdef EWP_EDL
4436*4882a593Smuzhiyun #define DHD_EDL_RING_SIZE (D2HRING_EDL_MAX_ITEM * D2HRING_EDL_ITEMSIZE)
4437*4882a593Smuzhiyun int dhd_event_logtrace_process_edl(dhd_pub_t *dhdp, uint8 *data,
4438*4882a593Smuzhiyun 		void *evt_decode_data);
4439*4882a593Smuzhiyun int dhd_edl_mem_init(dhd_pub_t *dhd);
4440*4882a593Smuzhiyun void dhd_edl_mem_deinit(dhd_pub_t *dhd);
4441*4882a593Smuzhiyun void dhd_prot_edl_ring_tcm_rd_update(dhd_pub_t *dhd);
4442*4882a593Smuzhiyun #define DHD_EDL_MEM_INIT(dhdp) dhd_edl_mem_init(dhdp)
4443*4882a593Smuzhiyun #define DHD_EDL_MEM_DEINIT(dhdp) dhd_edl_mem_deinit(dhdp)
4444*4882a593Smuzhiyun #define DHD_EDL_RING_TCM_RD_UPDATE(dhdp) \
4445*4882a593Smuzhiyun 	dhd_prot_edl_ring_tcm_rd_update(dhdp)
4446*4882a593Smuzhiyun #else
4447*4882a593Smuzhiyun #define DHD_EDL_MEM_INIT(dhdp) do { /* noop */ } while (0)
4448*4882a593Smuzhiyun #define DHD_EDL_MEM_DEINIT(dhdp) do { /* noop */ } while (0)
4449*4882a593Smuzhiyun #define DHD_EDL_RING_TCM_RD_UPDATE(dhdp) do { /* noop */ } while (0)
4450*4882a593Smuzhiyun #endif /* EWP_EDL */
4451*4882a593Smuzhiyun 
4452*4882a593Smuzhiyun #ifdef BIGDATA_SOFTAP
4453*4882a593Smuzhiyun void dhd_schedule_gather_ap_stadata(void *bcm_cfg, void *ndev, const wl_event_msg_t *e);
4454*4882a593Smuzhiyun #endif /* BIGDATA_SOFTAP */
4455*4882a593Smuzhiyun 
4456*4882a593Smuzhiyun #ifdef DHD_PKTTS
4457*4882a593Smuzhiyun int dhd_get_pktts_enab(dhd_pub_t *dhdp);
4458*4882a593Smuzhiyun int dhd_set_pktts_enab(dhd_pub_t *dhdp, bool val);
4459*4882a593Smuzhiyun 
4460*4882a593Smuzhiyun int dhd_get_pktts_flow(dhd_pub_t *dhdp, void *args, int len);
4461*4882a593Smuzhiyun int dhd_set_pktts_flow(dhd_pub_t *dhdp, void *params, int plen);
4462*4882a593Smuzhiyun pktts_flow_t *dhd_match_pktts_flow(dhd_pub_t *dhdp, uint32 checksum,
4463*4882a593Smuzhiyun 	uint32 *idx, uint32 *num_config);
4464*4882a593Smuzhiyun #endif /* DHD_PKTTS */
4465*4882a593Smuzhiyun 
4466*4882a593Smuzhiyun #if defined(DHD_H2D_LOG_TIME_SYNC)
4467*4882a593Smuzhiyun void dhd_h2d_log_time_sync_deferred_wq_schedule(dhd_pub_t *dhdp);
4468*4882a593Smuzhiyun void dhd_h2d_log_time_sync(dhd_pub_t *dhdp);
4469*4882a593Smuzhiyun #endif /* DHD_H2D_LOG_TIME_SYNC */
4470*4882a593Smuzhiyun extern void dhd_cleanup_if(struct net_device *net);
4471*4882a593Smuzhiyun 
4472*4882a593Smuzhiyun void dhd_schedule_logtrace(void *dhd_info);
4473*4882a593Smuzhiyun int dhd_print_fw_ver_from_file(dhd_pub_t *dhdp, char *fwpath);
4474*4882a593Smuzhiyun 
4475*4882a593Smuzhiyun #if defined(LINUX) || defined(linux)
4476*4882a593Smuzhiyun /* configuration of ecounters. API's tp start/stop. currently supported only for linux */
4477*4882a593Smuzhiyun extern int dhd_ecounter_configure(dhd_pub_t *dhd, bool enable);
4478*4882a593Smuzhiyun extern int dhd_start_ecounters(dhd_pub_t *dhd);
4479*4882a593Smuzhiyun extern int dhd_stop_ecounters(dhd_pub_t *dhd);
4480*4882a593Smuzhiyun extern int dhd_start_event_ecounters(dhd_pub_t *dhd);
4481*4882a593Smuzhiyun extern int dhd_stop_event_ecounters(dhd_pub_t *dhd);
4482*4882a593Smuzhiyun #endif /* LINUX || linux */
4483*4882a593Smuzhiyun 
4484*4882a593Smuzhiyun #define DHD_DUMP_TYPE_NAME_SIZE		32
4485*4882a593Smuzhiyun #define DHD_DUMP_FILE_PATH_SIZE		256
4486*4882a593Smuzhiyun #define DHD_DUMP_FILE_COUNT_MAX		5
4487*4882a593Smuzhiyun #define DHD_DUMP_TYPE_COUNT_MAX		10
4488*4882a593Smuzhiyun 
4489*4882a593Smuzhiyun #ifdef DHD_DUMP_MNGR
4490*4882a593Smuzhiyun typedef struct _DFM_elem {
4491*4882a593Smuzhiyun 	char type_name[DHD_DUMP_TYPE_NAME_SIZE];
4492*4882a593Smuzhiyun 	char file_path[DHD_DUMP_FILE_COUNT_MAX][DHD_DUMP_FILE_PATH_SIZE];
4493*4882a593Smuzhiyun 	int file_idx;
4494*4882a593Smuzhiyun } DFM_elem_t;
4495*4882a593Smuzhiyun 
4496*4882a593Smuzhiyun typedef struct _dhd_dump_file_manage {
4497*4882a593Smuzhiyun 	DFM_elem_t elems[DHD_DUMP_TYPE_COUNT_MAX];
4498*4882a593Smuzhiyun } dhd_dump_file_manage_t;
4499*4882a593Smuzhiyun 
4500*4882a593Smuzhiyun extern void dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname);
4501*4882a593Smuzhiyun #endif /* DHD_DUMP_MNGR */
4502*4882a593Smuzhiyun 
4503*4882a593Smuzhiyun #define HD_PREFIX_SIZE  2   /* hexadecimal prefix size */
4504*4882a593Smuzhiyun #define HD_BYTE_SIZE    2   /* hexadecimal byte size */
4505*4882a593Smuzhiyun 
4506*4882a593Smuzhiyun #ifdef DHD_HP2P
4507*4882a593Smuzhiyun extern unsigned long dhd_os_hp2plock(dhd_pub_t *pub);
4508*4882a593Smuzhiyun extern void dhd_os_hp2punlock(dhd_pub_t *pub, unsigned long flags);
4509*4882a593Smuzhiyun #endif /* DHD_HP2P */
4510*4882a593Smuzhiyun 
4511*4882a593Smuzhiyun #ifdef DNGL_AXI_ERROR_LOGGING
4512*4882a593Smuzhiyun extern void dhd_axi_error(dhd_pub_t *dhd);
4513*4882a593Smuzhiyun #ifdef DHD_USE_WQ_FOR_DNGL_AXI_ERROR
4514*4882a593Smuzhiyun extern void dhd_axi_error_dispatch(dhd_pub_t *dhdp);
4515*4882a593Smuzhiyun #endif /* DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
4516*4882a593Smuzhiyun #endif /* DNGL_AXI_ERROR_LOGGING */
4517*4882a593Smuzhiyun 
4518*4882a593Smuzhiyun #ifdef DHD_STATUS_LOGGING
4519*4882a593Smuzhiyun #include <dhd_statlog.h>
4520*4882a593Smuzhiyun #else
4521*4882a593Smuzhiyun #define ST(x)		0
4522*4882a593Smuzhiyun #define STDIR(x)	0
4523*4882a593Smuzhiyun #define DHD_STATLOG_CTRL(dhdp, stat, ifidx, reason) \
4524*4882a593Smuzhiyun 	do { /* noop */ } while (0)
4525*4882a593Smuzhiyun #define DHD_STATLOG_DATA(dhdp, stat, ifidx, dir, cond) \
4526*4882a593Smuzhiyun 	do { BCM_REFERENCE(cond); } while (0)
4527*4882a593Smuzhiyun #define DHD_STATLOG_DATA_RSN(dhdp, stat, ifidx, dir, reason) \
4528*4882a593Smuzhiyun 	do { /* noop */ } while (0)
4529*4882a593Smuzhiyun #endif /* DHD_STATUS_LOGGING */
4530*4882a593Smuzhiyun 
4531*4882a593Smuzhiyun #ifdef SUPPORT_SET_TID
4532*4882a593Smuzhiyun enum dhd_set_tid_mode {
4533*4882a593Smuzhiyun 	/* Disalbe changing TID */
4534*4882a593Smuzhiyun 	SET_TID_OFF = 0,
4535*4882a593Smuzhiyun 	/* Change TID for all UDP frames */
4536*4882a593Smuzhiyun 	SET_TID_ALL_UDP,
4537*4882a593Smuzhiyun 	/* Change TID for UDP frames based on UID */
4538*4882a593Smuzhiyun 	SET_TID_BASED_ON_UID
4539*4882a593Smuzhiyun };
4540*4882a593Smuzhiyun #if defined(linux) || defined(LINUX)
4541*4882a593Smuzhiyun extern void dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt);
4542*4882a593Smuzhiyun #else
dhd_set_tid_based_on_uid(dhd_pub_t * dhdp,void * pkt)4543*4882a593Smuzhiyun static INLINE void dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt) { return; }
4544*4882a593Smuzhiyun #endif /* linux || LINUX */
4545*4882a593Smuzhiyun #endif /* SUPPORT_SET_TID */
4546*4882a593Smuzhiyun 
4547*4882a593Smuzhiyun #ifdef CONFIG_SILENT_ROAM
4548*4882a593Smuzhiyun extern int dhd_sroam_set_mon(dhd_pub_t *dhd, bool set);
4549*4882a593Smuzhiyun typedef wlc_sroam_info_v1_t wlc_sroam_info_t;
4550*4882a593Smuzhiyun #endif /* CONFIG_SILENT_ROAM */
4551*4882a593Smuzhiyun 
4552*4882a593Smuzhiyun #ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL
4553*4882a593Smuzhiyun #define FILE_NAME_HAL_TAG	""
4554*4882a593Smuzhiyun #else
4555*4882a593Smuzhiyun #define FILE_NAME_HAL_TAG	"_hal" /* The tag name concatenated by HAL */
4556*4882a593Smuzhiyun #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
4557*4882a593Smuzhiyun 
4558*4882a593Smuzhiyun /* Given a number 'n' returns 'm' that is next larger power of 2 after n */
next_larger_power2(uint32 num)4559*4882a593Smuzhiyun static inline uint32 next_larger_power2(uint32 num)
4560*4882a593Smuzhiyun {
4561*4882a593Smuzhiyun 	if (num) {
4562*4882a593Smuzhiyun 		num--;
4563*4882a593Smuzhiyun 		num |= (num >> 1);
4564*4882a593Smuzhiyun 		num |= (num >> 2);
4565*4882a593Smuzhiyun 		num |= (num >> 4);
4566*4882a593Smuzhiyun 		num |= (num >> 8);
4567*4882a593Smuzhiyun 		num |= (num >> 16);
4568*4882a593Smuzhiyun 	}
4569*4882a593Smuzhiyun 	return (num + 1);
4570*4882a593Smuzhiyun }
4571*4882a593Smuzhiyun 
4572*4882a593Smuzhiyun extern struct dhd_if * dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
4573*4882a593Smuzhiyun uint8 dhd_d11_slices_num_get(dhd_pub_t *dhdp);
4574*4882a593Smuzhiyun #ifdef WL_AUTO_QOS
4575*4882a593Smuzhiyun extern void dhd_wl_sock_qos_set_status(dhd_pub_t *dhdp, unsigned long on_off);
4576*4882a593Smuzhiyun #endif /* WL_AUTO_QOS */
4577*4882a593Smuzhiyun 
4578*4882a593Smuzhiyun void *dhd_get_roam_evt(dhd_pub_t *dhdp);
4579*4882a593Smuzhiyun #if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
4580*4882a593Smuzhiyun extern int dhd_control_he_enab(dhd_pub_t * dhd, uint8 he_enab);
4581*4882a593Smuzhiyun extern uint8 control_he_enab;
4582*4882a593Smuzhiyun #endif /* DISABLE_HE_ENAB  || CUSTOM_CONTROL_HE_ENAB */
4583*4882a593Smuzhiyun 
4584*4882a593Smuzhiyun #ifdef DHD_SDTC_ETB_DUMP
4585*4882a593Smuzhiyun 
4586*4882a593Smuzhiyun #define DHD_SDTC_ETB_MEMPOOL_SIZE (33 * 1024)
4587*4882a593Smuzhiyun extern int dhd_sdtc_etb_mempool_init(dhd_pub_t *dhd);
4588*4882a593Smuzhiyun extern void dhd_sdtc_etb_mempool_deinit(dhd_pub_t *dhd);
4589*4882a593Smuzhiyun extern void dhd_sdtc_etb_init(dhd_pub_t *dhd);
4590*4882a593Smuzhiyun extern void dhd_sdtc_etb_deinit(dhd_pub_t *dhd);
4591*4882a593Smuzhiyun extern void dhd_sdtc_etb_dump(dhd_pub_t *dhd);
4592*4882a593Smuzhiyun #endif /* DHD_SDTC_ETB_DUMP */
4593*4882a593Smuzhiyun 
4594*4882a593Smuzhiyun #ifdef DHD_TX_PROFILE
4595*4882a593Smuzhiyun int dhd_tx_profile_attach(dhd_pub_t *dhdp);
4596*4882a593Smuzhiyun int dhd_tx_profile_detach(dhd_pub_t *dhdp);
4597*4882a593Smuzhiyun #endif /* defined (DHD_TX_PROFILE) */
4598*4882a593Smuzhiyun #if defined(DHD_LB_RXP)
4599*4882a593Smuzhiyun uint32 dhd_lb_rxp_process_qlen(dhd_pub_t *dhdp);
4600*4882a593Smuzhiyun /*
4601*4882a593Smuzhiyun  * To avoid OOM, Flow control will be kicked in when packet size in process_queue
4602*4882a593Smuzhiyun  * crosses LB_RXP_STOP_THR * rcpl ring size * 1500(pkt size) and will stop
4603*4882a593Smuzhiyun  * when it goes below LB_RXP_STRT_THR * rcpl ring size * 1500(pkt size)
4604*4882a593Smuzhiyun  */
4605*4882a593Smuzhiyun #define LB_RXP_STOP_THR 200	/* 200 * 1024 * 1500 = 300MB */
4606*4882a593Smuzhiyun #define LB_RXP_STRT_THR 199	/* 199 * 1024 * 1500 = 291MB */
4607*4882a593Smuzhiyun #endif /* DHD_LB_RXP */
4608*4882a593Smuzhiyun #ifdef DHD_SUPPORT_HDM
4609*4882a593Smuzhiyun extern bool hdm_trigger_init;
4610*4882a593Smuzhiyun extern int dhd_module_init_hdm(void);
4611*4882a593Smuzhiyun extern void dhd_hdm_wlan_sysfs_init(void);
4612*4882a593Smuzhiyun extern void dhd_hdm_wlan_sysfs_deinit(struct work_struct *);
4613*4882a593Smuzhiyun #define SYSFS_DEINIT_MS 10
4614*4882a593Smuzhiyun #endif /* DHD_SUPPORT_HDM */
4615*4882a593Smuzhiyun 
4616*4882a593Smuzhiyun #if defined(linux) || defined(LINUX)
4617*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) && defined(DHD_TCP_LIMIT_OUTPUT)
4618*4882a593Smuzhiyun void dhd_ctrl_tcp_limit_output_bytes(int level);
4619*4882a593Smuzhiyun #endif /* LINUX_VERSION_CODE > 4.19.0 && DHD_TCP_LIMIT_OUTPUT */
4620*4882a593Smuzhiyun #endif /* linux || LINUX */
4621*4882a593Smuzhiyun 
4622*4882a593Smuzhiyun #if defined(__linux__)
4623*4882a593Smuzhiyun extern void dhd_schedule_delayed_dpc_on_dpc_cpu(dhd_pub_t *dhdp, ulong delay);
4624*4882a593Smuzhiyun extern void dhd_handle_pktdata(dhd_pub_t *dhdp, int ifidx, void *pkt, uint8 *pktdata,
4625*4882a593Smuzhiyun 	uint32 pktid, uint32 pktlen, uint16 *pktfate, uint8 *dhd_udr, bool tx, int pkt_wake,
4626*4882a593Smuzhiyun 	bool pkt_log);
4627*4882a593Smuzhiyun #else
dhd_schedule_delayed_dpc_on_dpc_cpu(dhd_pub_t * dhdp,ulong delay)4628*4882a593Smuzhiyun static INLINE void dhd_schedule_delayed_dpc_on_dpc_cpu(dhd_pub_t *dhdp, ulong delay)
4629*4882a593Smuzhiyun 	{ return; }
dhd_handle_pktdata(dhd_pub_t * dhdp,int ifidx,void * pkt,uint8 * pktdata,uint32 pktid,uint32 pktlen,uint16 * pktfate,uint8 * dhd_udr,bool tx,int pkt_wake,bool pkt_log)4630*4882a593Smuzhiyun static INLINE void dhd_handle_pktdata(dhd_pub_t *dhdp, int ifidx, void *pkt, uint8 *pktdata,
4631*4882a593Smuzhiyun 	uint32 pktid, uint32 pktlen, uint16 *pktfate, uint8 *dhd_udr, bool tx, int pkt_wake,
4632*4882a593Smuzhiyun 	bool pkt_log) { return; }
4633*4882a593Smuzhiyun #endif /* __linux */
4634*4882a593Smuzhiyun 
4635*4882a593Smuzhiyun #if defined(BCMPCIE) && defined(__linux__)
4636*4882a593Smuzhiyun extern int dhd_check_shinfo_nrfrags(dhd_pub_t *dhdp, void *pktbuf, dmaaddr_t *pa, uint32 pktid);
4637*4882a593Smuzhiyun #else
dhd_check_shinfo_nrfrags(dhd_pub_t * dhdp,void * pktbuf,dmaaddr_t * pa,uint32 pktid)4638*4882a593Smuzhiyun static INLINE int dhd_check_shinfo_nrfrags(dhd_pub_t *dhdp, void *pktbuf, dmaaddr_t *pa,
4639*4882a593Smuzhiyun 	uint32 pktid) { return BCME_OK; }
4640*4882a593Smuzhiyun #endif /* BCMPCIE && __linux__ */
4641*4882a593Smuzhiyun 
4642*4882a593Smuzhiyun #ifdef HOST_SFH_LLC
4643*4882a593Smuzhiyun int dhd_ether_to_8023_hdr(osl_t *osh, struct ether_header *eh, void *p);
4644*4882a593Smuzhiyun int dhd_8023_llc_to_ether_hdr(osl_t *osh, struct ether_header *eh8023, void *p);
4645*4882a593Smuzhiyun #endif
4646*4882a593Smuzhiyun int dhd_schedule_socram_dump(dhd_pub_t *dhdp);
4647*4882a593Smuzhiyun 
4648*4882a593Smuzhiyun #ifdef DHD_AWDL
4649*4882a593Smuzhiyun int dhd_ether_to_awdl_llc_hdr(struct dhd_pub *dhd, struct ether_header *eh, void *p);
4650*4882a593Smuzhiyun int dhd_awdl_llc_to_eth_hdr(struct dhd_pub *dhd, struct ether_header *eh, void *p);
4651*4882a593Smuzhiyun #endif /* DHD_AWDL */
4652*4882a593Smuzhiyun 
4653*4882a593Smuzhiyun #ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
4654*4882a593Smuzhiyun #ifndef DEBUGABILITY
4655*4882a593Smuzhiyun #error "DHD_DEBUGABILITY_LOG_DUMP_RING without DEBUGABILITY"
4656*4882a593Smuzhiyun #endif /* DEBUGABILITY */
4657*4882a593Smuzhiyun #endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */
4658*4882a593Smuzhiyun 
4659*4882a593Smuzhiyun #if defined(__linux__)
4660*4882a593Smuzhiyun #ifdef DHD_SUPPORT_VFS_CALL
dhd_filp_open(const char * filename,int flags,int mode)4661*4882a593Smuzhiyun static INLINE struct file *dhd_filp_open(const char *filename, int flags, int mode)
4662*4882a593Smuzhiyun {
4663*4882a593Smuzhiyun 	return filp_open(filename, flags, mode);
4664*4882a593Smuzhiyun }
4665*4882a593Smuzhiyun 
dhd_filp_close(void * image,void * id)4666*4882a593Smuzhiyun static INLINE int dhd_filp_close(void *image, void *id)
4667*4882a593Smuzhiyun {
4668*4882a593Smuzhiyun 	return filp_close((struct file *)image, id);
4669*4882a593Smuzhiyun }
4670*4882a593Smuzhiyun 
dhd_i_size_read(const struct inode * inode)4671*4882a593Smuzhiyun static INLINE int dhd_i_size_read(const struct inode *inode)
4672*4882a593Smuzhiyun {
4673*4882a593Smuzhiyun 	return i_size_read(inode);
4674*4882a593Smuzhiyun }
4675*4882a593Smuzhiyun 
dhd_kernel_read_compat(struct file * fp,loff_t pos,void * buf,size_t count)4676*4882a593Smuzhiyun static INLINE int dhd_kernel_read_compat(struct file *fp, loff_t pos, void *buf, size_t count)
4677*4882a593Smuzhiyun {
4678*4882a593Smuzhiyun 	return kernel_read_compat(fp, pos, buf, count);
4679*4882a593Smuzhiyun }
4680*4882a593Smuzhiyun 
dhd_vfs_read(struct file * filep,char * buf,size_t size,loff_t * pos)4681*4882a593Smuzhiyun static INLINE int dhd_vfs_read(struct file *filep, char *buf, size_t size, loff_t *pos)
4682*4882a593Smuzhiyun {
4683*4882a593Smuzhiyun 	return vfs_read(filep, buf, size, pos);
4684*4882a593Smuzhiyun }
4685*4882a593Smuzhiyun 
dhd_vfs_write(struct file * filep,char * buf,size_t size,loff_t * pos)4686*4882a593Smuzhiyun static INLINE int dhd_vfs_write(struct file *filep, char *buf, size_t size, loff_t *pos)
4687*4882a593Smuzhiyun {
4688*4882a593Smuzhiyun 	return vfs_write(filep, buf, size, pos);
4689*4882a593Smuzhiyun }
4690*4882a593Smuzhiyun 
dhd_vfs_fsync(struct file * filep,int datasync)4691*4882a593Smuzhiyun static INLINE int dhd_vfs_fsync(struct file *filep, int datasync)
4692*4882a593Smuzhiyun {
4693*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
4694*4882a593Smuzhiyun 	return vfs_fsync(filep, datasync);
4695*4882a593Smuzhiyun #else
4696*4882a593Smuzhiyun 	return vfs_fsync(filep, filep->f_path.dentry, 0);
4697*4882a593Smuzhiyun #endif
4698*4882a593Smuzhiyun }
4699*4882a593Smuzhiyun 
dhd_vfs_stat(char * buf,struct kstat * stat)4700*4882a593Smuzhiyun static INLINE int dhd_vfs_stat(char *buf, struct kstat *stat)
4701*4882a593Smuzhiyun {
4702*4882a593Smuzhiyun 	return vfs_stat(buf, stat);
4703*4882a593Smuzhiyun }
4704*4882a593Smuzhiyun 
dhd_kern_path(char * name,int flags,struct path * file_path)4705*4882a593Smuzhiyun static INLINE int dhd_kern_path(char *name, int flags, struct path *file_path)
4706*4882a593Smuzhiyun {
4707*4882a593Smuzhiyun 	return kern_path(name, flags, file_path);
4708*4882a593Smuzhiyun }
4709*4882a593Smuzhiyun 
4710*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
4711*4882a593Smuzhiyun #define DHD_VFS_INODE(dir) (dir->d_inode)
4712*4882a593Smuzhiyun #else
4713*4882a593Smuzhiyun #define DHD_VFS_INODE(dir) d_inode(dir)
4714*4882a593Smuzhiyun #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) */
4715*4882a593Smuzhiyun 
4716*4882a593Smuzhiyun #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
4717*4882a593Smuzhiyun #define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b)
4718*4882a593Smuzhiyun #else
4719*4882a593Smuzhiyun #define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b, c)
4720*4882a593Smuzhiyun #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
4721*4882a593Smuzhiyun 
4722*4882a593Smuzhiyun #else
4723*4882a593Smuzhiyun #define DHD_VFS_UNLINK(dir, b, c) 0
4724*4882a593Smuzhiyun 
dhd_filp_open(const char * filename,int flags,int mode)4725*4882a593Smuzhiyun static INLINE struct file *dhd_filp_open(const char *filename, int flags, int mode)
4726*4882a593Smuzhiyun 	{ printf("%s: DHD_SUPPORT_VFS_CALL not defined\n", __FUNCTION__); return NULL; }
dhd_filp_close(void * image,void * id)4727*4882a593Smuzhiyun static INLINE int dhd_filp_close(void *image, void *id)
4728*4882a593Smuzhiyun 	{ return 0; }
dhd_i_size_read(const struct inode * inode)4729*4882a593Smuzhiyun static INLINE int dhd_i_size_read(const struct inode *inode)
4730*4882a593Smuzhiyun 	{ return 0; }
dhd_kernel_read_compat(struct file * fp,loff_t pos,void * buf,size_t count)4731*4882a593Smuzhiyun static INLINE int dhd_kernel_read_compat(struct file *fp, loff_t pos, void *buf, size_t count)
4732*4882a593Smuzhiyun 	{ return 0; }
dhd_vfs_read(struct file * filep,char * buf,size_t size,loff_t * pos)4733*4882a593Smuzhiyun static INLINE int dhd_vfs_read(struct file *filep, char *buf, size_t size, loff_t *pos)
4734*4882a593Smuzhiyun 	{ return 0; }
dhd_vfs_write(struct file * filep,char * buf,size_t size,loff_t * pos)4735*4882a593Smuzhiyun static INLINE int dhd_vfs_write(struct file *filep, char *buf, size_t size, loff_t *pos)
4736*4882a593Smuzhiyun 	{ return 0; }
dhd_vfs_fsync(struct file * filep,int datasync)4737*4882a593Smuzhiyun static INLINE int dhd_vfs_fsync(struct file *filep, int datasync)
4738*4882a593Smuzhiyun 	{ return 0; }
dhd_vfs_stat(char * buf,struct kstat * stat)4739*4882a593Smuzhiyun static INLINE int dhd_vfs_stat(char *buf, struct kstat *stat)
4740*4882a593Smuzhiyun 	{ return 0; }
dhd_kern_path(char * name,int flags,struct path * file_path)4741*4882a593Smuzhiyun static INLINE int dhd_kern_path(char *name, int flags, struct path *file_path)
4742*4882a593Smuzhiyun 	{ return 0; }
4743*4882a593Smuzhiyun #endif /* DHD_SUPPORT_VFS_CALL */
4744*4882a593Smuzhiyun #endif /* __linux__ */
4745*4882a593Smuzhiyun 
4746*4882a593Smuzhiyun #ifdef WL_MONITOR
4747*4882a593Smuzhiyun void dhd_set_monitor(dhd_pub_t *pub, int ifidx, int val);
4748*4882a593Smuzhiyun #endif /* WL_MONITOR */
4749*4882a593Smuzhiyun #endif /* _dhd_h_ */
4750