xref: /OK3568_Linux_fs/external/rkwifibt/drivers/infineon/dhd_pcie.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Linux DHD Bus Module for PCIE
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 1999-2017, Broadcom Corporation
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  *      Unless you and Broadcom execute a separate written software license
9*4882a593Smuzhiyun  * agreement governing use of this software, this software is licensed to you
10*4882a593Smuzhiyun  * under the terms of the GNU General Public License version 2 (the "GPL"),
11*4882a593Smuzhiyun  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12*4882a593Smuzhiyun  * following added to such license:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  *      As a special exception, the copyright holders of this software give you
15*4882a593Smuzhiyun  * permission to link this software with independent modules, and to copy and
16*4882a593Smuzhiyun  * distribute the resulting executable under terms of your choice, provided that
17*4882a593Smuzhiyun  * you also meet, for each linked independent module, the terms and conditions of
18*4882a593Smuzhiyun  * the license of that module.  An independent module is a module which is not
19*4882a593Smuzhiyun  * derived from this software.  The special exception does not apply to any
20*4882a593Smuzhiyun  * modifications of the software.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  *      Notwithstanding the above, under no circumstances may you combine this
23*4882a593Smuzhiyun  * software in any way with any other Broadcom software provided under a license
24*4882a593Smuzhiyun  * other than the GPL, without Broadcom's express prior written consent.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  * <<Broadcom-WL-IPTag/Open:>>
28*4882a593Smuzhiyun  *
29*4882a593Smuzhiyun  * $Id: dhd_pcie.h 698652 2017-05-10 10:39:24Z $
30*4882a593Smuzhiyun  */
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #ifndef dhd_pcie_h
33*4882a593Smuzhiyun #define dhd_pcie_h
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include <bcmpcie.h>
36*4882a593Smuzhiyun #include <hnd_cons.h>
37*4882a593Smuzhiyun #ifdef SUPPORT_LINKDOWN_RECOVERY
38*4882a593Smuzhiyun #ifdef CONFIG_ARCH_MSM
39*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSM
40*4882a593Smuzhiyun #include <linux/msm_pcie.h>
41*4882a593Smuzhiyun #else
42*4882a593Smuzhiyun #include <mach/msm_pcie.h>
43*4882a593Smuzhiyun #endif /* CONFIG_PCI_MSM */
44*4882a593Smuzhiyun #endif /* CONFIG_ARCH_MSM */
45*4882a593Smuzhiyun #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
46*4882a593Smuzhiyun #if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
47*4882a593Smuzhiyun 	defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820)
48*4882a593Smuzhiyun #include <linux/exynos-pci-noti.h>
49*4882a593Smuzhiyun extern int exynos_pcie_register_event(struct exynos_pcie_register_event *reg);
50*4882a593Smuzhiyun extern int exynos_pcie_deregister_event(struct exynos_pcie_register_event *reg);
51*4882a593Smuzhiyun #endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895
52*4882a593Smuzhiyun 	* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820
53*4882a593Smuzhiyun 	*/
54*4882a593Smuzhiyun #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
55*4882a593Smuzhiyun #endif /* SUPPORT_LINKDOWN_RECOVERY */
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun #ifdef DHD_PCIE_RUNTIMEPM
58*4882a593Smuzhiyun #include <linux/mutex.h>
59*4882a593Smuzhiyun #include <linux/wait.h>
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #define DEFAULT_DHD_RUNTIME_MS 100
62*4882a593Smuzhiyun #ifndef CUSTOM_DHD_RUNTIME_MS
63*4882a593Smuzhiyun #define CUSTOM_DHD_RUNTIME_MS DEFAULT_DHD_RUNTIME_MS
64*4882a593Smuzhiyun #endif /* CUSTOM_DHD_RUNTIME_MS */
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun #ifndef MAX_IDLE_COUNT
67*4882a593Smuzhiyun #define MAX_IDLE_COUNT 16
68*4882a593Smuzhiyun #endif /* MAX_IDLE_COUNT */
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun #ifndef MAX_RESUME_WAIT
71*4882a593Smuzhiyun #define MAX_RESUME_WAIT 100
72*4882a593Smuzhiyun #endif /* MAX_RESUME_WAIT */
73*4882a593Smuzhiyun #endif /* DHD_PCIE_RUNTIMEPM */
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /* defines */
76*4882a593Smuzhiyun #define PCIE_SHARED_VERSION		PCIE_SHARED_VERSION_7
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun #define PCMSGBUF_HDRLEN 0
79*4882a593Smuzhiyun #define DONGLE_REG_MAP_SIZE (32 * 1024)
80*4882a593Smuzhiyun #define DONGLE_TCM_MAP_SIZE (4096 * 1024)
81*4882a593Smuzhiyun #define DONGLE_MIN_MEMSIZE (128 *1024)
82*4882a593Smuzhiyun #ifdef DHD_DEBUG
83*4882a593Smuzhiyun #define DHD_PCIE_SUCCESS 0
84*4882a593Smuzhiyun #define DHD_PCIE_FAILURE 1
85*4882a593Smuzhiyun #endif /* DHD_DEBUG */
86*4882a593Smuzhiyun #define	REMAP_ENAB(bus)			((bus)->remap)
87*4882a593Smuzhiyun #define	REMAP_ISADDR(bus, a)		(((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize)))
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun #ifdef SUPPORT_LINKDOWN_RECOVERY
90*4882a593Smuzhiyun #ifdef CONFIG_ARCH_MSM
91*4882a593Smuzhiyun #define struct_pcie_notify		struct msm_pcie_notify
92*4882a593Smuzhiyun #define struct_pcie_register_event	struct msm_pcie_register_event
93*4882a593Smuzhiyun #endif /* CONFIG_ARCH_MSM */
94*4882a593Smuzhiyun #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
95*4882a593Smuzhiyun #if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
96*4882a593Smuzhiyun 	defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820)
97*4882a593Smuzhiyun #define struct_pcie_notify		struct exynos_pcie_notify
98*4882a593Smuzhiyun #define struct_pcie_register_event	struct exynos_pcie_register_event
99*4882a593Smuzhiyun #endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895
100*4882a593Smuzhiyun 	* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820
101*4882a593Smuzhiyun 	*/
102*4882a593Smuzhiyun #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
103*4882a593Smuzhiyun #endif /* SUPPORT_LINKDOWN_RECOVERY */
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun #define MAX_DHD_TX_FLOWS	320
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /* user defined data structures */
108*4882a593Smuzhiyun /* Device console log buffer state */
109*4882a593Smuzhiyun #define CONSOLE_LINE_MAX	192u
110*4882a593Smuzhiyun #define CONSOLE_BUFFER_MAX	(8 * 1024)
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun #ifdef IDLE_TX_FLOW_MGMT
113*4882a593Smuzhiyun #define IDLE_FLOW_LIST_TIMEOUT 5000
114*4882a593Smuzhiyun #define IDLE_FLOW_RING_TIMEOUT 5000
115*4882a593Smuzhiyun #endif /* IDLE_TX_FLOW_MGMT */
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun /* HWA enabled and inited */
118*4882a593Smuzhiyun #define HWA_ACTIVE(dhd)		(((dhd)->hwa_enable) && ((dhd)->hwa_inited))
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /* implicit DMA for h2d wr and d2h rd indice from Host memory to TCM */
121*4882a593Smuzhiyun #define IDMA_ENAB(dhd)		((dhd)->idma_enable)
122*4882a593Smuzhiyun #define IDMA_ACTIVE(dhd)	(((dhd)->idma_enable) && ((dhd)->idma_inited))
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun #define IDMA_CAPABLE(bus)	(((bus)->sih->buscorerev == 19) || ((bus)->sih->buscorerev >= 23))
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun /* IFRM (Implicit Flow Ring Manager enable and inited */
127*4882a593Smuzhiyun #define IFRM_ENAB(dhd)		((dhd)->ifrm_enable)
128*4882a593Smuzhiyun #define IFRM_ACTIVE(dhd)	(((dhd)->ifrm_enable) && ((dhd)->ifrm_inited))
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /* DAR registers use for h2d doorbell */
131*4882a593Smuzhiyun #define DAR_ENAB(dhd)		((dhd)->dar_enable)
132*4882a593Smuzhiyun #define DAR_ACTIVE(dhd)		(((dhd)->dar_enable) && ((dhd)->dar_inited))
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun /* DAR WAR for revs < 64 */
135*4882a593Smuzhiyun #define DAR_PWRREQ(bus)		(((bus)->_dar_war) && DAR_ACTIVE((bus)->dhd))
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun /* PCIE CTO Prevention and Recovery */
138*4882a593Smuzhiyun #define PCIECTO_ENAB(bus)	((bus)->cto_enable)
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun /* Implicit DMA index usage :
141*4882a593Smuzhiyun  * Index 0 for h2d write index transfer
142*4882a593Smuzhiyun  * Index 1 for d2h read index transfer
143*4882a593Smuzhiyun  */
144*4882a593Smuzhiyun #define IDMA_IDX0 0
145*4882a593Smuzhiyun #define IDMA_IDX1 1
146*4882a593Smuzhiyun #define IDMA_IDX2 2
147*4882a593Smuzhiyun #define IDMA_IDX3 3
148*4882a593Smuzhiyun #define DMA_TYPE_SHIFT	4
149*4882a593Smuzhiyun #define DMA_TYPE_IDMA	1
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun #define DHDPCIE_CONFIG_HDR_SIZE 16
152*4882a593Smuzhiyun #define DHDPCIE_CONFIG_CHECK_DELAY_MS 10 /* 10ms */
153*4882a593Smuzhiyun #define DHDPCIE_CONFIG_CHECK_RETRY_COUNT 20
154*4882a593Smuzhiyun #define DHDPCIE_DONGLE_PWR_TOGGLE_DELAY 1000 /* 1ms in units of us */
155*4882a593Smuzhiyun #define DHDPCIE_PM_D3_DELAY 200000 /* 200ms in units of us */
156*4882a593Smuzhiyun #define DHDPCIE_PM_D2_DELAY 200 /* 200us */
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun typedef struct dhd_console {
159*4882a593Smuzhiyun 	 uint		count;	/* Poll interval msec counter */
160*4882a593Smuzhiyun 	 uint		log_addr;		 /* Log struct address (fixed) */
161*4882a593Smuzhiyun 	 hnd_log_t	 log;			 /* Log struct (host copy) */
162*4882a593Smuzhiyun 	 uint		 bufsize;		 /* Size of log buffer */
163*4882a593Smuzhiyun 	 uint8		 *buf;			 /* Log buffer (host copy) */
164*4882a593Smuzhiyun 	 uint		 last;			 /* Last buffer read index */
165*4882a593Smuzhiyun } dhd_console_t;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun typedef struct ring_sh_info {
168*4882a593Smuzhiyun 	uint32 ring_mem_addr;
169*4882a593Smuzhiyun 	uint32 ring_state_w;
170*4882a593Smuzhiyun 	uint32 ring_state_r;
171*4882a593Smuzhiyun } ring_sh_info_t;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun #define DEVICE_WAKE_NONE	0
174*4882a593Smuzhiyun #define DEVICE_WAKE_OOB		1
175*4882a593Smuzhiyun #define DEVICE_WAKE_INB		2
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun #define INBAND_DW_ENAB(bus)		((bus)->dw_option == DEVICE_WAKE_INB)
178*4882a593Smuzhiyun #define OOB_DW_ENAB(bus)		((bus)->dw_option == DEVICE_WAKE_OOB)
179*4882a593Smuzhiyun #define NO_DW_ENAB(bus)			((bus)->dw_option == DEVICE_WAKE_NONE)
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun #define PCIE_RELOAD_WAR_ENAB(buscorerev) \
182*4882a593Smuzhiyun 	((buscorerev == 66) || (buscorerev == 67) || (buscorerev == 68) || (buscorerev == 70))
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun /*
185*4882a593Smuzhiyun  * HW JIRA - CRWLPCIEGEN2-672
186*4882a593Smuzhiyun  * Producer Index Feature which is used by F1 gets reset on F0 FLR
187*4882a593Smuzhiyun  * fixed in REV68
188*4882a593Smuzhiyun  */
189*4882a593Smuzhiyun #define PCIE_ENUM_RESET_WAR_ENAB(buscorerev) \
190*4882a593Smuzhiyun 	((buscorerev == 66) || (buscorerev == 67))
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun struct dhd_bus;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun struct dhd_pcie_rev {
195*4882a593Smuzhiyun 	uint8	fw_rev;
196*4882a593Smuzhiyun 	void (*handle_mb_data)(struct dhd_bus *);
197*4882a593Smuzhiyun };
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun typedef struct dhdpcie_config_save
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	uint32 header[DHDPCIE_CONFIG_HDR_SIZE];
202*4882a593Smuzhiyun 	/* pmcsr save */
203*4882a593Smuzhiyun 	uint32 pmcsr;
204*4882a593Smuzhiyun 	/* express save */
205*4882a593Smuzhiyun 	uint32 exp_dev_ctrl_stat;
206*4882a593Smuzhiyun 	uint32 exp_link_ctrl_stat;
207*4882a593Smuzhiyun 	uint32 exp_dev_ctrl_stat2;
208*4882a593Smuzhiyun 	uint32 exp_link_ctrl_stat2;
209*4882a593Smuzhiyun 	/* msi save */
210*4882a593Smuzhiyun 	uint32 msi_cap;
211*4882a593Smuzhiyun 	uint32 msi_addr0;
212*4882a593Smuzhiyun 	uint32 msi_addr1;
213*4882a593Smuzhiyun 	uint32 msi_data;
214*4882a593Smuzhiyun 	/* l1pm save */
215*4882a593Smuzhiyun 	uint32 l1pm0;
216*4882a593Smuzhiyun 	uint32 l1pm1;
217*4882a593Smuzhiyun 	/* ltr save */
218*4882a593Smuzhiyun 	uint32 ltr;
219*4882a593Smuzhiyun 	/* aer save */
220*4882a593Smuzhiyun 	uint32 aer_caps_ctrl; /* 0x18 */
221*4882a593Smuzhiyun 	uint32 aer_severity;  /* 0x0C */
222*4882a593Smuzhiyun 	uint32 aer_umask;     /* 0x08 */
223*4882a593Smuzhiyun 	uint32 aer_cmask;     /* 0x14 */
224*4882a593Smuzhiyun 	uint32 aer_root_cmd;  /* 0x2c */
225*4882a593Smuzhiyun 	/* BAR0 and BAR1 windows */
226*4882a593Smuzhiyun 	uint32 bar0_win;
227*4882a593Smuzhiyun 	uint32 bar1_win;
228*4882a593Smuzhiyun } dhdpcie_config_save_t;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun /* The level of bus communication with the dongle */
231*4882a593Smuzhiyun enum dhd_bus_low_power_state {
232*4882a593Smuzhiyun 	DHD_BUS_NO_LOW_POWER_STATE,	/* Not in low power state */
233*4882a593Smuzhiyun 	DHD_BUS_D3_INFORM_SENT,		/* D3 INFORM sent */
234*4882a593Smuzhiyun 	DHD_BUS_D3_ACK_RECIEVED,	/* D3 ACK recieved */
235*4882a593Smuzhiyun };
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun /** Instantiated once for each hardware (dongle) instance that this DHD manages */
238*4882a593Smuzhiyun typedef struct dhd_bus {
239*4882a593Smuzhiyun 	dhd_pub_t	*dhd;	/**< pointer to per hardware (dongle) unique instance */
240*4882a593Smuzhiyun 	struct pci_dev  *rc_dev;	/* pci RC device handle */
241*4882a593Smuzhiyun 	struct pci_dev  *dev;		/* pci device handle */
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	dll_t		flowring_active_list; /* constructed list of tx flowring queues */
244*4882a593Smuzhiyun #ifdef IDLE_TX_FLOW_MGMT
245*4882a593Smuzhiyun 	uint64		active_list_last_process_ts;
246*4882a593Smuzhiyun 						/* stores the timestamp of active list processing */
247*4882a593Smuzhiyun #endif /* IDLE_TX_FLOW_MGMT */
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	si_t		*sih;			/* Handle for SI calls */
250*4882a593Smuzhiyun 	char		*vars;			/* Variables (from CIS and/or other) */
251*4882a593Smuzhiyun 	uint		varsz;			/* Size of variables buffer */
252*4882a593Smuzhiyun 	uint32		sbaddr;			/* Current SB window pointer (-1, invalid) */
253*4882a593Smuzhiyun 	sbpcieregs_t	*reg;			/* Registers for PCIE core */
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	uint		armrev;			/* CPU core revision */
256*4882a593Smuzhiyun 	uint		coreid;			/* CPU core id */
257*4882a593Smuzhiyun 	uint		ramrev;			/* SOCRAM core revision */
258*4882a593Smuzhiyun 	uint32		ramsize;		/* Size of RAM in SOCRAM (bytes) */
259*4882a593Smuzhiyun 	uint32		orig_ramsize;		/* Size of RAM in SOCRAM (bytes) */
260*4882a593Smuzhiyun 	bool		ramsize_adjusted;	/* flag to note adjustment, so that
261*4882a593Smuzhiyun 						 * adjustment routine and file io
262*4882a593Smuzhiyun 						 * are avoided on D3 cold -> D0
263*4882a593Smuzhiyun 						 */
264*4882a593Smuzhiyun 	uint32		srmemsize;		/* Size of SRMEM */
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	uint32		bus;			/* gSPI or SDIO bus */
267*4882a593Smuzhiyun 	uint32		intstatus;		/* Intstatus bits (events) pending */
268*4882a593Smuzhiyun 	bool		dpc_sched;		/* Indicates DPC schedule (intrpt rcvd) */
269*4882a593Smuzhiyun 	bool		fcstate;		/* State of dongle flow-control */
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	uint16		cl_devid;		/* cached devid for dhdsdio_probe_attach() */
272*4882a593Smuzhiyun 	char		*fw_path;		/* module_param: path to firmware image */
273*4882a593Smuzhiyun 	char		*nv_path;		/* module_param: path to nvram vars file */
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	struct pktq	txq;			/* Queue length used for flow-control */
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	bool		intr;			/* Use interrupts */
278*4882a593Smuzhiyun 	bool		ipend;			/* Device interrupt is pending */
279*4882a593Smuzhiyun 	bool		intdis;			/* Interrupts disabled by isr */
280*4882a593Smuzhiyun 	uint		intrcount;		/* Count of device interrupt callbacks */
281*4882a593Smuzhiyun 	uint		lastintrs;		/* Count as of last watchdog timer */
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	dhd_console_t	console;		/* Console output polling support */
284*4882a593Smuzhiyun 	uint		console_addr;		/* Console address from shared struct */
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	bool		alp_only;		/* Don't use HT clock (ALP only) */
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	bool		remap;		/* Contiguous 1MB RAM: 512K socram + 512K devram
289*4882a593Smuzhiyun 					 * Available with socram rev 16
290*4882a593Smuzhiyun 					 * Remap region not DMA-able
291*4882a593Smuzhiyun 					 */
292*4882a593Smuzhiyun 	uint32		resetinstr;
293*4882a593Smuzhiyun 	uint32		dongle_ram_base;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	ulong		shared_addr;
296*4882a593Smuzhiyun 	pciedev_shared_t	*pcie_sh;
297*4882a593Smuzhiyun 	uint32		dma_rxoffset;
298*4882a593Smuzhiyun 	volatile char	*regs;		/* pci device memory va */
299*4882a593Smuzhiyun 	volatile char	*tcm;		/* pci device memory va */
300*4882a593Smuzhiyun 	osl_t		*osh;
301*4882a593Smuzhiyun 	uint32		nvram_csm;	/* Nvram checksum */
302*4882a593Smuzhiyun 	uint16		pollrate;
303*4882a593Smuzhiyun 	uint16  polltick;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	volatile uint32  *pcie_mb_intr_addr;
306*4882a593Smuzhiyun 	volatile uint32  *pcie_mb_intr_2_addr;
307*4882a593Smuzhiyun 	void    *pcie_mb_intr_osh;
308*4882a593Smuzhiyun 	bool	sleep_allowed;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	wake_counts_t	wake_counts;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	/* version 3 shared struct related info start */
313*4882a593Smuzhiyun 	ring_sh_info_t	ring_sh[BCMPCIE_COMMON_MSGRINGS + MAX_DHD_TX_FLOWS];
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	uint8	h2d_ring_count;
316*4882a593Smuzhiyun 	uint8	d2h_ring_count;
317*4882a593Smuzhiyun 	uint32  ringmem_ptr;
318*4882a593Smuzhiyun 	uint32  ring_state_ptr;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	uint32 d2h_dma_scratch_buffer_mem_addr;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	uint32 h2d_mb_data_ptr_addr;
323*4882a593Smuzhiyun 	uint32 d2h_mb_data_ptr_addr;
324*4882a593Smuzhiyun 	/* version 3 shared struct related info end */
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	uint32 def_intmask;
327*4882a593Smuzhiyun 	uint32 d2h_mb_mask;
328*4882a593Smuzhiyun 	uint32 pcie_mailbox_mask;
329*4882a593Smuzhiyun 	uint32 pcie_mailbox_int;
330*4882a593Smuzhiyun 	bool	ltrsleep_on_unload;
331*4882a593Smuzhiyun 	uint	wait_for_d3_ack;
332*4882a593Smuzhiyun 	uint16	max_tx_flowrings;
333*4882a593Smuzhiyun 	uint16	max_submission_rings;
334*4882a593Smuzhiyun 	uint16	max_completion_rings;
335*4882a593Smuzhiyun 	uint16	max_cmn_rings;
336*4882a593Smuzhiyun 	uint32	rw_index_sz;
337*4882a593Smuzhiyun 	bool	db1_for_mb;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	dhd_timeout_t doorbell_timer;
340*4882a593Smuzhiyun 	bool	device_wake_state;
341*4882a593Smuzhiyun 	bool	irq_registered;
342*4882a593Smuzhiyun 	bool	d2h_intr_method;
343*4882a593Smuzhiyun #ifdef SUPPORT_LINKDOWN_RECOVERY
344*4882a593Smuzhiyun #if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \
345*4882a593Smuzhiyun 	defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
346*4882a593Smuzhiyun 	defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820))
347*4882a593Smuzhiyun #ifdef CONFIG_ARCH_MSM
348*4882a593Smuzhiyun 	uint8 no_cfg_restore;
349*4882a593Smuzhiyun #endif /* CONFIG_ARCH_MSM */
350*4882a593Smuzhiyun 	struct_pcie_register_event pcie_event;
351*4882a593Smuzhiyun #endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY &&
352*4882a593Smuzhiyun 	* (CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 ||
353*4882a593Smuzhiyun 	* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 ))
354*4882a593Smuzhiyun 	*/
355*4882a593Smuzhiyun 	bool read_shm_fail;
356*4882a593Smuzhiyun #endif /* SUPPORT_LINKDOWN_RECOVERY */
357*4882a593Smuzhiyun 	int32 idletime;                 /* Control for activity timeout */
358*4882a593Smuzhiyun #ifdef DHD_PCIE_RUNTIMEPM
359*4882a593Smuzhiyun 	int32 idlecount;                /* Activity timeout counter */
360*4882a593Smuzhiyun 	int32 bus_wake;                 /* For wake up the bus */
361*4882a593Smuzhiyun 	bool runtime_resume_done;       /* For check runtime suspend end */
362*4882a593Smuzhiyun 	struct mutex pm_lock;            /* Synchronize for system PM & runtime PM */
363*4882a593Smuzhiyun 	wait_queue_head_t rpm_queue;    /* wait-queue for bus wake up */
364*4882a593Smuzhiyun #endif /* DHD_PCIE_RUNTIMEPM */
365*4882a593Smuzhiyun 	uint32 d3_inform_cnt;
366*4882a593Smuzhiyun 	uint32 d0_inform_cnt;
367*4882a593Smuzhiyun 	uint32 d0_inform_in_use_cnt;
368*4882a593Smuzhiyun 	uint8 force_suspend;
369*4882a593Smuzhiyun 	uint8 is_linkdown;
370*4882a593Smuzhiyun 	uint8 no_bus_init;
371*4882a593Smuzhiyun #ifdef IDLE_TX_FLOW_MGMT
372*4882a593Smuzhiyun 	bool enable_idle_flowring_mgmt;
373*4882a593Smuzhiyun #endif /* IDLE_TX_FLOW_MGMT */
374*4882a593Smuzhiyun 	struct	dhd_pcie_rev api;
375*4882a593Smuzhiyun 	bool use_mailbox;
376*4882a593Smuzhiyun 	bool    use_d0_inform;
377*4882a593Smuzhiyun 	void	*bus_lock;
378*4882a593Smuzhiyun 	void *backplane_access_lock;
379*4882a593Smuzhiyun 	enum dhd_bus_low_power_state bus_low_power_state;
380*4882a593Smuzhiyun 	uint32  hostready_count; /* Number of hostready issued */
381*4882a593Smuzhiyun #if defined(BCMPCIE_OOB_HOST_WAKE)
382*4882a593Smuzhiyun 	bool	oob_presuspend;
383*4882a593Smuzhiyun #endif // endif
384*4882a593Smuzhiyun 	dhdpcie_config_save_t saved_config;
385*4882a593Smuzhiyun 	ulong resume_intr_enable_count;
386*4882a593Smuzhiyun 	ulong dpc_intr_enable_count;
387*4882a593Smuzhiyun 	ulong isr_intr_disable_count;
388*4882a593Smuzhiyun 	ulong suspend_intr_disable_count;
389*4882a593Smuzhiyun 	ulong dpc_return_busdown_count;
390*4882a593Smuzhiyun 	ulong non_ours_irq_count;
391*4882a593Smuzhiyun #ifdef BCMPCIE_OOB_HOST_WAKE
392*4882a593Smuzhiyun 	ulong oob_intr_count;
393*4882a593Smuzhiyun 	ulong oob_intr_enable_count;
394*4882a593Smuzhiyun 	ulong oob_intr_disable_count;
395*4882a593Smuzhiyun 	uint64 last_oob_irq_time;
396*4882a593Smuzhiyun 	uint64 last_oob_irq_enable_time;
397*4882a593Smuzhiyun 	uint64 last_oob_irq_disable_time;
398*4882a593Smuzhiyun #endif /* BCMPCIE_OOB_HOST_WAKE */
399*4882a593Smuzhiyun 	uint64 isr_entry_time;
400*4882a593Smuzhiyun 	uint64 isr_exit_time;
401*4882a593Smuzhiyun 	uint64 dpc_sched_time;
402*4882a593Smuzhiyun 	uint64 dpc_entry_time;
403*4882a593Smuzhiyun 	uint64 dpc_exit_time;
404*4882a593Smuzhiyun 	uint64 resched_dpc_time;
405*4882a593Smuzhiyun 	uint64 last_d3_inform_time;
406*4882a593Smuzhiyun 	uint64 last_process_ctrlbuf_time;
407*4882a593Smuzhiyun 	uint64 last_process_flowring_time;
408*4882a593Smuzhiyun 	uint64 last_process_txcpl_time;
409*4882a593Smuzhiyun 	uint64 last_process_rxcpl_time;
410*4882a593Smuzhiyun 	uint64 last_process_infocpl_time;
411*4882a593Smuzhiyun 	uint64 last_process_edl_time;
412*4882a593Smuzhiyun 	uint64 last_suspend_start_time;
413*4882a593Smuzhiyun 	uint64 last_suspend_end_time;
414*4882a593Smuzhiyun 	uint64 last_resume_start_time;
415*4882a593Smuzhiyun 	uint64 last_resume_end_time;
416*4882a593Smuzhiyun 	uint64 last_non_ours_irq_time;
417*4882a593Smuzhiyun 	uint8 hwa_enab_bmap;
418*4882a593Smuzhiyun 	bool  idma_enabled;
419*4882a593Smuzhiyun 	bool  ifrm_enabled;
420*4882a593Smuzhiyun 	bool  dar_enabled;
421*4882a593Smuzhiyun 	uint32 dmaxfer_complete;
422*4882a593Smuzhiyun 	uint8	dw_option;
423*4882a593Smuzhiyun #ifdef DHD_PCIE_RUNTIMEPM
424*4882a593Smuzhiyun 	bool chk_pm;	/* To avoid counting of wake up from Runtime PM */
425*4882a593Smuzhiyun #endif /* DHD_PCIE_RUNTIMEPM */
426*4882a593Smuzhiyun 	bool _dar_war;
427*4882a593Smuzhiyun 	uint8  dma_chan;
428*4882a593Smuzhiyun 	bool	cto_enable;	/* enable PCIE CTO Prevention and recovery */
429*4882a593Smuzhiyun 	uint32  cto_threshold;  /* PCIE CTO timeout threshold */
430*4882a593Smuzhiyun 	bool	cto_triggered;	/* CTO is triggered */
431*4882a593Smuzhiyun 	int	pwr_req_ref;
432*4882a593Smuzhiyun 	bool flr_force_fail; /* user intends to simulate flr force fail */
433*4882a593Smuzhiyun 	bool intr_enabled; /* ready to receive interrupts from dongle */
434*4882a593Smuzhiyun 	bool force_bt_quiesce; /* send bt_quiesce command to BT driver. */
435*4882a593Smuzhiyun #if defined(DHD_H2D_LOG_TIME_SYNC)
436*4882a593Smuzhiyun 	ulong dhd_rte_time_sync_count; /* OSL_SYSUPTIME_US() */
437*4882a593Smuzhiyun #endif /* DHD_H2D_LOG_TIME_SYNC */
438*4882a593Smuzhiyun 	bool rc_ep_aspm_cap; /* RC and EP ASPM capable */
439*4882a593Smuzhiyun 	bool rc_ep_l1ss_cap; /* EC and EP L1SS capable */
440*4882a593Smuzhiyun 	uint16 hp2p_txcpl_max_items;
441*4882a593Smuzhiyun 	uint16 hp2p_rxcpl_max_items;
442*4882a593Smuzhiyun 	/* PCIE coherent status */
443*4882a593Smuzhiyun 	uint32 coherent_state;
444*4882a593Smuzhiyun } dhd_bus_t;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun #ifdef DHD_MSI_SUPPORT
447*4882a593Smuzhiyun extern uint enable_msi;
448*4882a593Smuzhiyun #endif /* DHD_MSI_SUPPORT */
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun enum {
451*4882a593Smuzhiyun 	PCIE_INTX = 0,
452*4882a593Smuzhiyun 	PCIE_MSI = 1
453*4882a593Smuzhiyun };
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun /* function declarations */
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun extern uint32* dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size);
458*4882a593Smuzhiyun extern int dhdpcie_bus_register(void);
459*4882a593Smuzhiyun extern void dhdpcie_bus_unregister(void);
460*4882a593Smuzhiyun extern bool dhdpcie_chipmatch(uint16 vendor, uint16 device);
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun extern int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr,
463*4882a593Smuzhiyun 	volatile char *regs, volatile char *tcm, void *pci_dev);
464*4882a593Smuzhiyun extern uint32 dhdpcie_bus_cfg_read_dword(struct dhd_bus *bus, uint32 addr, uint32 size);
465*4882a593Smuzhiyun extern void dhdpcie_bus_cfg_write_dword(struct dhd_bus *bus, uint32 addr, uint32 size, uint32 data);
466*4882a593Smuzhiyun extern void dhdpcie_bus_intr_enable(struct dhd_bus *bus);
467*4882a593Smuzhiyun extern void dhdpcie_bus_intr_disable(struct dhd_bus *bus);
468*4882a593Smuzhiyun extern int dhpcie_bus_mask_interrupt(dhd_bus_t *bus);
469*4882a593Smuzhiyun extern void dhdpcie_bus_release(struct dhd_bus *bus);
470*4882a593Smuzhiyun extern int32 dhdpcie_bus_isr(struct dhd_bus *bus);
471*4882a593Smuzhiyun extern void dhdpcie_free_irq(dhd_bus_t *bus);
472*4882a593Smuzhiyun extern void dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value);
473*4882a593Smuzhiyun extern void dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake);
474*4882a593Smuzhiyun extern void dhdpcie_dongle_reset(dhd_bus_t *bus);
475*4882a593Smuzhiyun #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
476*4882a593Smuzhiyun extern int dhdpcie_bus_suspend(struct  dhd_bus *bus, bool state, bool byint);
477*4882a593Smuzhiyun #else
478*4882a593Smuzhiyun extern int dhdpcie_bus_suspend(struct  dhd_bus *bus, bool state);
479*4882a593Smuzhiyun #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
480*4882a593Smuzhiyun extern int dhdpcie_pci_suspend_resume(struct  dhd_bus *bus, bool state);
481*4882a593Smuzhiyun extern uint32 dhdpcie_force_alp(struct dhd_bus *bus, bool enable);
482*4882a593Smuzhiyun extern uint32 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int force_l1_entry_time);
483*4882a593Smuzhiyun extern bool dhdpcie_tcm_valid(dhd_bus_t *bus);
484*4882a593Smuzhiyun extern void dhdpcie_pme_active(osl_t *osh, bool enable);
485*4882a593Smuzhiyun extern bool dhdpcie_pme_cap(osl_t *osh);
486*4882a593Smuzhiyun extern uint32 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val);
487*4882a593Smuzhiyun extern void dhdpcie_set_pmu_min_res_mask(struct dhd_bus *bus, uint min_res_mask);
488*4882a593Smuzhiyun extern uint8 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val);
489*4882a593Smuzhiyun extern int dhdpcie_disable_irq(dhd_bus_t *bus);
490*4882a593Smuzhiyun extern int dhdpcie_disable_irq_nosync(dhd_bus_t *bus);
491*4882a593Smuzhiyun extern int dhdpcie_enable_irq(dhd_bus_t *bus);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun extern void dhd_bus_dump_dar_registers(struct dhd_bus *bus);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun extern uint32 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset);
496*4882a593Smuzhiyun extern uint32 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
497*4882a593Smuzhiyun 		bool is_write, uint32 writeval);
498*4882a593Smuzhiyun extern uint32 dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
499*4882a593Smuzhiyun 		bool is_write, uint32 writeval);
500*4882a593Smuzhiyun extern uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus);
501*4882a593Smuzhiyun extern int dhdpcie_start_host_pcieclock(dhd_bus_t *bus);
502*4882a593Smuzhiyun extern int dhdpcie_stop_host_pcieclock(dhd_bus_t *bus);
503*4882a593Smuzhiyun extern int dhdpcie_disable_device(dhd_bus_t *bus);
504*4882a593Smuzhiyun extern int dhdpcie_alloc_resource(dhd_bus_t *bus);
505*4882a593Smuzhiyun extern void dhdpcie_free_resource(dhd_bus_t *bus);
506*4882a593Smuzhiyun extern void dhdpcie_dump_resource(dhd_bus_t *bus);
507*4882a593Smuzhiyun extern int dhdpcie_bus_request_irq(struct dhd_bus *bus);
508*4882a593Smuzhiyun void dhdpcie_os_setbar1win(dhd_bus_t *bus, uint32 addr);
509*4882a593Smuzhiyun void dhdpcie_os_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
510*4882a593Smuzhiyun uint8 dhdpcie_os_rtcm8(dhd_bus_t *bus, ulong offset);
511*4882a593Smuzhiyun void dhdpcie_os_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
512*4882a593Smuzhiyun uint16 dhdpcie_os_rtcm16(dhd_bus_t *bus, ulong offset);
513*4882a593Smuzhiyun void dhdpcie_os_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
514*4882a593Smuzhiyun uint32 dhdpcie_os_rtcm32(dhd_bus_t *bus, ulong offset);
515*4882a593Smuzhiyun #ifdef DHD_SUPPORT_64BIT
516*4882a593Smuzhiyun void dhdpcie_os_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
517*4882a593Smuzhiyun uint64 dhdpcie_os_rtcm64(dhd_bus_t *bus, ulong offset);
518*4882a593Smuzhiyun #endif // endif
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun extern int dhdpcie_enable_device(dhd_bus_t *bus);
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun #ifdef BCMPCIE_OOB_HOST_WAKE
523*4882a593Smuzhiyun extern int dhdpcie_oob_intr_register(dhd_bus_t *bus);
524*4882a593Smuzhiyun extern void dhdpcie_oob_intr_unregister(dhd_bus_t *bus);
525*4882a593Smuzhiyun extern void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable);
526*4882a593Smuzhiyun extern int dhdpcie_get_oob_irq_num(struct dhd_bus *bus);
527*4882a593Smuzhiyun extern int dhdpcie_get_oob_irq_status(struct dhd_bus *bus);
528*4882a593Smuzhiyun extern int dhdpcie_get_oob_irq_level(void);
529*4882a593Smuzhiyun #endif /* BCMPCIE_OOB_HOST_WAKE */
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun #if defined(CONFIG_ARCH_EXYNOS)
532*4882a593Smuzhiyun #define SAMSUNG_PCIE_VENDOR_ID 0x144d
533*4882a593Smuzhiyun #if defined(CONFIG_MACH_UNIVERSAL5433)
534*4882a593Smuzhiyun #define SAMSUNG_PCIE_DEVICE_ID 0xa5e3
535*4882a593Smuzhiyun #define SAMSUNG_PCIE_CH_NUM
536*4882a593Smuzhiyun #elif defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS7420)
537*4882a593Smuzhiyun #define SAMSUNG_PCIE_DEVICE_ID 0xa575
538*4882a593Smuzhiyun #define SAMSUNG_PCIE_CH_NUM 1
539*4882a593Smuzhiyun #elif defined(CONFIG_SOC_EXYNOS8890)
540*4882a593Smuzhiyun #define SAMSUNG_PCIE_DEVICE_ID 0xa544
541*4882a593Smuzhiyun #define SAMSUNG_PCIE_CH_NUM 0
542*4882a593Smuzhiyun #elif defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
543*4882a593Smuzhiyun 	defined(CONFIG_SOC_EXYNOS9820)
544*4882a593Smuzhiyun #define SAMSUNG_PCIE_DEVICE_ID 0xecec
545*4882a593Smuzhiyun #define SAMSUNG_PCIE_CH_NUM 0
546*4882a593Smuzhiyun #else
547*4882a593Smuzhiyun #error "Not supported platform"
548*4882a593Smuzhiyun #endif /* CONFIG_SOC_EXYNOSXXXX & CONFIG_MACH_UNIVERSALXXXX */
549*4882a593Smuzhiyun #endif /* CONFIG_ARCH_EXYNOS */
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun #if defined(CONFIG_ARCH_MSM)
552*4882a593Smuzhiyun #define MSM_PCIE_VENDOR_ID 0x17cb
553*4882a593Smuzhiyun #if defined(CONFIG_ARCH_APQ8084)
554*4882a593Smuzhiyun #define MSM_PCIE_DEVICE_ID 0x0101
555*4882a593Smuzhiyun #elif defined(CONFIG_ARCH_MSM8994)
556*4882a593Smuzhiyun #define MSM_PCIE_DEVICE_ID 0x0300
557*4882a593Smuzhiyun #elif defined(CONFIG_ARCH_MSM8996)
558*4882a593Smuzhiyun #define MSM_PCIE_DEVICE_ID 0x0104
559*4882a593Smuzhiyun #elif defined(CONFIG_ARCH_MSM8998)
560*4882a593Smuzhiyun #define MSM_PCIE_DEVICE_ID 0x0105
561*4882a593Smuzhiyun #elif defined(CONFIG_ARCH_SDM845) || defined(CONFIG_ARCH_SM8150)
562*4882a593Smuzhiyun #define MSM_PCIE_DEVICE_ID 0x0106
563*4882a593Smuzhiyun #elif defined(USE_CUSTOM_MSM_PCIE)
564*4882a593Smuzhiyun #define MSM_PCIE_DEVICE_ID MSM_PCIE_CUSTOM_DEVICE_ID
565*4882a593Smuzhiyun #else
566*4882a593Smuzhiyun #error "Not supported platform"
567*4882a593Smuzhiyun #endif // endif
568*4882a593Smuzhiyun #endif /* CONFIG_ARCH_MSM */
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun #if defined(CONFIG_X86)
571*4882a593Smuzhiyun #define X86_PCIE_VENDOR_ID 0x8086
572*4882a593Smuzhiyun #define X86_PCIE_DEVICE_ID 0x9c1a
573*4882a593Smuzhiyun #endif /* CONFIG_X86 */
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun #if defined(CONFIG_ARCH_TEGRA)
576*4882a593Smuzhiyun #define TEGRA_PCIE_VENDOR_ID 0x14e4
577*4882a593Smuzhiyun #define TEGRA_PCIE_DEVICE_ID 0x4347
578*4882a593Smuzhiyun #endif /* CONFIG_ARCH_TEGRA */
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun #if defined(BOARD_HIKEY)
581*4882a593Smuzhiyun #define HIKEY_PCIE_VENDOR_ID 0x19e5
582*4882a593Smuzhiyun #define HIKEY_PCIE_DEVICE_ID 0x3660
583*4882a593Smuzhiyun #endif /* BOARD_HIKEY */
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun #define DUMMY_PCIE_VENDOR_ID 0xffff
586*4882a593Smuzhiyun #define DUMMY_PCIE_DEVICE_ID 0xffff
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun #if defined(CONFIG_ARCH_EXYNOS)
589*4882a593Smuzhiyun #define PCIE_RC_VENDOR_ID SAMSUNG_PCIE_VENDOR_ID
590*4882a593Smuzhiyun #define PCIE_RC_DEVICE_ID SAMSUNG_PCIE_DEVICE_ID
591*4882a593Smuzhiyun #elif defined(CONFIG_ARCH_MSM)
592*4882a593Smuzhiyun #define PCIE_RC_VENDOR_ID MSM_PCIE_VENDOR_ID
593*4882a593Smuzhiyun #define PCIE_RC_DEVICE_ID MSM_PCIE_DEVICE_ID
594*4882a593Smuzhiyun #elif defined(CONFIG_X86)
595*4882a593Smuzhiyun #define PCIE_RC_VENDOR_ID X86_PCIE_VENDOR_ID
596*4882a593Smuzhiyun #define PCIE_RC_DEVICE_ID X86_PCIE_DEVICE_ID
597*4882a593Smuzhiyun #elif defined(CONFIG_ARCH_TEGRA)
598*4882a593Smuzhiyun #define PCIE_RC_VENDOR_ID TEGRA_PCIE_VENDOR_ID
599*4882a593Smuzhiyun #define PCIE_RC_DEVICE_ID TEGRA_PCIE_DEVICE_ID
600*4882a593Smuzhiyun #elif defined(BOARD_HIKEY)
601*4882a593Smuzhiyun #define PCIE_RC_VENDOR_ID HIKEY_PCIE_VENDOR_ID
602*4882a593Smuzhiyun #define PCIE_RC_DEVICE_ID HIKEY_PCIE_DEVICE_ID
603*4882a593Smuzhiyun #else
604*4882a593Smuzhiyun /* Use dummy vendor and device IDs */
605*4882a593Smuzhiyun #define PCIE_RC_VENDOR_ID DUMMY_PCIE_VENDOR_ID
606*4882a593Smuzhiyun #define PCIE_RC_DEVICE_ID DUMMY_PCIE_DEVICE_ID
607*4882a593Smuzhiyun #endif /* CONFIG_ARCH_EXYNOS */
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun #define DHD_REGULAR_RING    0
610*4882a593Smuzhiyun #define DHD_HP2P_RING    1
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun #ifdef USE_EXYNOS_PCIE_RC_PMPATCH
613*4882a593Smuzhiyun #ifdef CONFIG_MACH_UNIVERSAL5433
614*4882a593Smuzhiyun extern int exynos_pcie_pm_suspend(void);
615*4882a593Smuzhiyun extern int exynos_pcie_pm_resume(void);
616*4882a593Smuzhiyun #else
617*4882a593Smuzhiyun extern int exynos_pcie_pm_suspend(int ch_num);
618*4882a593Smuzhiyun extern int exynos_pcie_pm_resume(int ch_num);
619*4882a593Smuzhiyun #endif /* CONFIG_MACH_UNIVERSAL5433 */
620*4882a593Smuzhiyun #endif /* USE_EXYNOS_PCIE_RC_PMPATCH */
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun #ifdef CONFIG_ARCH_TEGRA
623*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0))
624*4882a593Smuzhiyun extern int tegra_pcie_pm_suspend(void);
625*4882a593Smuzhiyun extern int tegra_pcie_pm_resume(void);
626*4882a593Smuzhiyun #endif // endif
627*4882a593Smuzhiyun #endif /* CONFIG_ARCH_TEGRA */
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun extern int dhd_buzzz_dump_dngl(dhd_bus_t *bus);
630*4882a593Smuzhiyun #ifdef IDLE_TX_FLOW_MGMT
631*4882a593Smuzhiyun extern int dhd_bus_flow_ring_resume_request(struct dhd_bus *bus, void *arg);
632*4882a593Smuzhiyun extern void dhd_bus_flow_ring_resume_response(struct dhd_bus *bus, uint16 flowid, int32 status);
633*4882a593Smuzhiyun extern int dhd_bus_flow_ring_suspend_request(struct dhd_bus *bus, void *arg);
634*4882a593Smuzhiyun extern void dhd_bus_flow_ring_suspend_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
635*4882a593Smuzhiyun extern void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus,
636*4882a593Smuzhiyun 	flow_ring_node_t *flow_ring_node);
637*4882a593Smuzhiyun extern void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus,
638*4882a593Smuzhiyun 	flow_ring_node_t *flow_ring_node);
639*4882a593Smuzhiyun extern void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus,
640*4882a593Smuzhiyun 	flow_ring_node_t *flow_ring_node);
641*4882a593Smuzhiyun extern void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus,
642*4882a593Smuzhiyun 	flow_ring_node_t *flow_ring_node);
643*4882a593Smuzhiyun #endif /* IDLE_TX_FLOW_MGMT */
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun extern int dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data);
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun #ifdef DHD_WAKE_STATUS
648*4882a593Smuzhiyun int bcmpcie_get_total_wake(struct dhd_bus *bus);
649*4882a593Smuzhiyun int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag);
650*4882a593Smuzhiyun #endif /* DHD_WAKE_STATUS */
651*4882a593Smuzhiyun extern bool dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus);
652*4882a593Smuzhiyun extern void dhd_bus_hostready(struct  dhd_bus *bus);
653*4882a593Smuzhiyun extern void dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option);
654*4882a593Smuzhiyun extern int dhdpcie_irq_disabled(struct dhd_bus *bus);
655*4882a593Smuzhiyun 
dhdpcie_is_arm_halted(struct dhd_bus * bus)656*4882a593Smuzhiyun static INLINE bool dhdpcie_is_arm_halted(struct dhd_bus *bus) {return TRUE;}
dhd_os_wifi_platform_set_power(uint32 value)657*4882a593Smuzhiyun static INLINE int dhd_os_wifi_platform_set_power(uint32 value) {return BCME_OK; }
658*4882a593Smuzhiyun static INLINE void
dhdpcie_dongle_flr_or_pwr_toggle(dhd_bus_t * bus)659*4882a593Smuzhiyun dhdpcie_dongle_flr_or_pwr_toggle(dhd_bus_t *bus)
660*4882a593Smuzhiyun { return; }
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun int dhdpcie_config_check(dhd_bus_t *bus);
663*4882a593Smuzhiyun int dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr);
664*4882a593Smuzhiyun int dhdpcie_config_save(dhd_bus_t *bus);
665*4882a593Smuzhiyun int dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state);
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun extern bool dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus);
668*4882a593Smuzhiyun extern bool dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus);
669*4882a593Smuzhiyun extern bool dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus);
670*4882a593Smuzhiyun extern bool dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus);
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun static INLINE uint32
dhd_pcie_config_read(osl_t * osh,uint offset,uint size)673*4882a593Smuzhiyun dhd_pcie_config_read(osl_t *osh, uint offset, uint size)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun 	OSL_DELAY(100);
676*4882a593Smuzhiyun 	return OSL_PCI_READ_CONFIG(osh, offset, size);
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun static INLINE uint32
dhd_pcie_corereg_read(si_t * sih,uint val)680*4882a593Smuzhiyun dhd_pcie_corereg_read(si_t *sih, uint val)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun 	OSL_DELAY(100);
683*4882a593Smuzhiyun 	si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, val);
684*4882a593Smuzhiyun 	return si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), 0, 0);
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun extern int dhdpcie_get_fwpath_otp(dhd_bus_t *bus, char *fw_path, char *nv_path,
688*4882a593Smuzhiyun 		char *clm_path, char *txcap_path);
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun extern int dhd_pcie_debug_info_dump(dhd_pub_t *dhd);
691*4882a593Smuzhiyun extern void dhd_pcie_intr_count_dump(dhd_pub_t *dhd);
692*4882a593Smuzhiyun extern void dhdpcie_bus_clear_intstatus(dhd_bus_t *bus);
693*4882a593Smuzhiyun #ifdef DHD_HP2P
694*4882a593Smuzhiyun extern uint16 dhd_bus_get_hp2p_ring_max_size(dhd_bus_t *bus, bool tx);
695*4882a593Smuzhiyun #endif // endif
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun #endif /* dhd_pcie_h */
698