xref: /OK3568_Linux_fs/external/rkwifibt/drivers/infineon/dhd_pcie.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Linux DHD Bus Module for PCIE
3  *
4  * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
5  *
6  * Copyright (C) 1999-2017, Broadcom Corporation
7  *
8  *      Unless you and Broadcom execute a separate written software license
9  * agreement governing use of this software, this software is licensed to you
10  * under the terms of the GNU General Public License version 2 (the "GPL"),
11  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12  * following added to such license:
13  *
14  *      As a special exception, the copyright holders of this software give you
15  * permission to link this software with independent modules, and to copy and
16  * distribute the resulting executable under terms of your choice, provided that
17  * you also meet, for each linked independent module, the terms and conditions of
18  * the license of that module.  An independent module is a module which is not
19  * derived from this software.  The special exception does not apply to any
20  * modifications of the software.
21  *
22  *      Notwithstanding the above, under no circumstances may you combine this
23  * software in any way with any other Broadcom software provided under a license
24  * other than the GPL, without Broadcom's express prior written consent.
25  *
26  *
27  * <<Broadcom-WL-IPTag/Open:>>
28  *
29  * $Id: dhd_pcie.h 698652 2017-05-10 10:39:24Z $
30  */
31 
32 #ifndef dhd_pcie_h
33 #define dhd_pcie_h
34 
35 #include <bcmpcie.h>
36 #include <hnd_cons.h>
37 #ifdef SUPPORT_LINKDOWN_RECOVERY
38 #ifdef CONFIG_ARCH_MSM
39 #ifdef CONFIG_PCI_MSM
40 #include <linux/msm_pcie.h>
41 #else
42 #include <mach/msm_pcie.h>
43 #endif /* CONFIG_PCI_MSM */
44 #endif /* CONFIG_ARCH_MSM */
45 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
46 #if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
47 	defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820)
48 #include <linux/exynos-pci-noti.h>
49 extern int exynos_pcie_register_event(struct exynos_pcie_register_event *reg);
50 extern int exynos_pcie_deregister_event(struct exynos_pcie_register_event *reg);
51 #endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895
52 	* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820
53 	*/
54 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
55 #endif /* SUPPORT_LINKDOWN_RECOVERY */
56 
57 #ifdef DHD_PCIE_RUNTIMEPM
58 #include <linux/mutex.h>
59 #include <linux/wait.h>
60 
61 #define DEFAULT_DHD_RUNTIME_MS 100
62 #ifndef CUSTOM_DHD_RUNTIME_MS
63 #define CUSTOM_DHD_RUNTIME_MS DEFAULT_DHD_RUNTIME_MS
64 #endif /* CUSTOM_DHD_RUNTIME_MS */
65 
66 #ifndef MAX_IDLE_COUNT
67 #define MAX_IDLE_COUNT 16
68 #endif /* MAX_IDLE_COUNT */
69 
70 #ifndef MAX_RESUME_WAIT
71 #define MAX_RESUME_WAIT 100
72 #endif /* MAX_RESUME_WAIT */
73 #endif /* DHD_PCIE_RUNTIMEPM */
74 
75 /* defines */
76 #define PCIE_SHARED_VERSION		PCIE_SHARED_VERSION_7
77 
78 #define PCMSGBUF_HDRLEN 0
79 #define DONGLE_REG_MAP_SIZE (32 * 1024)
80 #define DONGLE_TCM_MAP_SIZE (4096 * 1024)
81 #define DONGLE_MIN_MEMSIZE (128 *1024)
82 #ifdef DHD_DEBUG
83 #define DHD_PCIE_SUCCESS 0
84 #define DHD_PCIE_FAILURE 1
85 #endif /* DHD_DEBUG */
86 #define	REMAP_ENAB(bus)			((bus)->remap)
87 #define	REMAP_ISADDR(bus, a)		(((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize)))
88 
89 #ifdef SUPPORT_LINKDOWN_RECOVERY
90 #ifdef CONFIG_ARCH_MSM
91 #define struct_pcie_notify		struct msm_pcie_notify
92 #define struct_pcie_register_event	struct msm_pcie_register_event
93 #endif /* CONFIG_ARCH_MSM */
94 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
95 #if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
96 	defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820)
97 #define struct_pcie_notify		struct exynos_pcie_notify
98 #define struct_pcie_register_event	struct exynos_pcie_register_event
99 #endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895
100 	* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820
101 	*/
102 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
103 #endif /* SUPPORT_LINKDOWN_RECOVERY */
104 
105 #define MAX_DHD_TX_FLOWS	320
106 
107 /* user defined data structures */
108 /* Device console log buffer state */
109 #define CONSOLE_LINE_MAX	192u
110 #define CONSOLE_BUFFER_MAX	(8 * 1024)
111 
112 #ifdef IDLE_TX_FLOW_MGMT
113 #define IDLE_FLOW_LIST_TIMEOUT 5000
114 #define IDLE_FLOW_RING_TIMEOUT 5000
115 #endif /* IDLE_TX_FLOW_MGMT */
116 
117 /* HWA enabled and inited */
118 #define HWA_ACTIVE(dhd)		(((dhd)->hwa_enable) && ((dhd)->hwa_inited))
119 
120 /* implicit DMA for h2d wr and d2h rd indice from Host memory to TCM */
121 #define IDMA_ENAB(dhd)		((dhd)->idma_enable)
122 #define IDMA_ACTIVE(dhd)	(((dhd)->idma_enable) && ((dhd)->idma_inited))
123 
124 #define IDMA_CAPABLE(bus)	(((bus)->sih->buscorerev == 19) || ((bus)->sih->buscorerev >= 23))
125 
126 /* IFRM (Implicit Flow Ring Manager enable and inited */
127 #define IFRM_ENAB(dhd)		((dhd)->ifrm_enable)
128 #define IFRM_ACTIVE(dhd)	(((dhd)->ifrm_enable) && ((dhd)->ifrm_inited))
129 
130 /* DAR registers use for h2d doorbell */
131 #define DAR_ENAB(dhd)		((dhd)->dar_enable)
132 #define DAR_ACTIVE(dhd)		(((dhd)->dar_enable) && ((dhd)->dar_inited))
133 
134 /* DAR WAR for revs < 64 */
135 #define DAR_PWRREQ(bus)		(((bus)->_dar_war) && DAR_ACTIVE((bus)->dhd))
136 
137 /* PCIE CTO Prevention and Recovery */
138 #define PCIECTO_ENAB(bus)	((bus)->cto_enable)
139 
140 /* Implicit DMA index usage :
141  * Index 0 for h2d write index transfer
142  * Index 1 for d2h read index transfer
143  */
144 #define IDMA_IDX0 0
145 #define IDMA_IDX1 1
146 #define IDMA_IDX2 2
147 #define IDMA_IDX3 3
148 #define DMA_TYPE_SHIFT	4
149 #define DMA_TYPE_IDMA	1
150 
151 #define DHDPCIE_CONFIG_HDR_SIZE 16
152 #define DHDPCIE_CONFIG_CHECK_DELAY_MS 10 /* 10ms */
153 #define DHDPCIE_CONFIG_CHECK_RETRY_COUNT 20
154 #define DHDPCIE_DONGLE_PWR_TOGGLE_DELAY 1000 /* 1ms in units of us */
155 #define DHDPCIE_PM_D3_DELAY 200000 /* 200ms in units of us */
156 #define DHDPCIE_PM_D2_DELAY 200 /* 200us */
157 
158 typedef struct dhd_console {
159 	 uint		count;	/* Poll interval msec counter */
160 	 uint		log_addr;		 /* Log struct address (fixed) */
161 	 hnd_log_t	 log;			 /* Log struct (host copy) */
162 	 uint		 bufsize;		 /* Size of log buffer */
163 	 uint8		 *buf;			 /* Log buffer (host copy) */
164 	 uint		 last;			 /* Last buffer read index */
165 } dhd_console_t;
166 
167 typedef struct ring_sh_info {
168 	uint32 ring_mem_addr;
169 	uint32 ring_state_w;
170 	uint32 ring_state_r;
171 } ring_sh_info_t;
172 
173 #define DEVICE_WAKE_NONE	0
174 #define DEVICE_WAKE_OOB		1
175 #define DEVICE_WAKE_INB		2
176 
177 #define INBAND_DW_ENAB(bus)		((bus)->dw_option == DEVICE_WAKE_INB)
178 #define OOB_DW_ENAB(bus)		((bus)->dw_option == DEVICE_WAKE_OOB)
179 #define NO_DW_ENAB(bus)			((bus)->dw_option == DEVICE_WAKE_NONE)
180 
181 #define PCIE_RELOAD_WAR_ENAB(buscorerev) \
182 	((buscorerev == 66) || (buscorerev == 67) || (buscorerev == 68) || (buscorerev == 70))
183 
184 /*
185  * HW JIRA - CRWLPCIEGEN2-672
186  * Producer Index Feature which is used by F1 gets reset on F0 FLR
187  * fixed in REV68
188  */
189 #define PCIE_ENUM_RESET_WAR_ENAB(buscorerev) \
190 	((buscorerev == 66) || (buscorerev == 67))
191 
192 struct dhd_bus;
193 
194 struct dhd_pcie_rev {
195 	uint8	fw_rev;
196 	void (*handle_mb_data)(struct dhd_bus *);
197 };
198 
199 typedef struct dhdpcie_config_save
200 {
201 	uint32 header[DHDPCIE_CONFIG_HDR_SIZE];
202 	/* pmcsr save */
203 	uint32 pmcsr;
204 	/* express save */
205 	uint32 exp_dev_ctrl_stat;
206 	uint32 exp_link_ctrl_stat;
207 	uint32 exp_dev_ctrl_stat2;
208 	uint32 exp_link_ctrl_stat2;
209 	/* msi save */
210 	uint32 msi_cap;
211 	uint32 msi_addr0;
212 	uint32 msi_addr1;
213 	uint32 msi_data;
214 	/* l1pm save */
215 	uint32 l1pm0;
216 	uint32 l1pm1;
217 	/* ltr save */
218 	uint32 ltr;
219 	/* aer save */
220 	uint32 aer_caps_ctrl; /* 0x18 */
221 	uint32 aer_severity;  /* 0x0C */
222 	uint32 aer_umask;     /* 0x08 */
223 	uint32 aer_cmask;     /* 0x14 */
224 	uint32 aer_root_cmd;  /* 0x2c */
225 	/* BAR0 and BAR1 windows */
226 	uint32 bar0_win;
227 	uint32 bar1_win;
228 } dhdpcie_config_save_t;
229 
230 /* The level of bus communication with the dongle */
231 enum dhd_bus_low_power_state {
232 	DHD_BUS_NO_LOW_POWER_STATE,	/* Not in low power state */
233 	DHD_BUS_D3_INFORM_SENT,		/* D3 INFORM sent */
234 	DHD_BUS_D3_ACK_RECIEVED,	/* D3 ACK recieved */
235 };
236 
237 /** Instantiated once for each hardware (dongle) instance that this DHD manages */
238 typedef struct dhd_bus {
239 	dhd_pub_t	*dhd;	/**< pointer to per hardware (dongle) unique instance */
240 	struct pci_dev  *rc_dev;	/* pci RC device handle */
241 	struct pci_dev  *dev;		/* pci device handle */
242 
243 	dll_t		flowring_active_list; /* constructed list of tx flowring queues */
244 #ifdef IDLE_TX_FLOW_MGMT
245 	uint64		active_list_last_process_ts;
246 						/* stores the timestamp of active list processing */
247 #endif /* IDLE_TX_FLOW_MGMT */
248 
249 	si_t		*sih;			/* Handle for SI calls */
250 	char		*vars;			/* Variables (from CIS and/or other) */
251 	uint		varsz;			/* Size of variables buffer */
252 	uint32		sbaddr;			/* Current SB window pointer (-1, invalid) */
253 	sbpcieregs_t	*reg;			/* Registers for PCIE core */
254 
255 	uint		armrev;			/* CPU core revision */
256 	uint		coreid;			/* CPU core id */
257 	uint		ramrev;			/* SOCRAM core revision */
258 	uint32		ramsize;		/* Size of RAM in SOCRAM (bytes) */
259 	uint32		orig_ramsize;		/* Size of RAM in SOCRAM (bytes) */
260 	bool		ramsize_adjusted;	/* flag to note adjustment, so that
261 						 * adjustment routine and file io
262 						 * are avoided on D3 cold -> D0
263 						 */
264 	uint32		srmemsize;		/* Size of SRMEM */
265 
266 	uint32		bus;			/* gSPI or SDIO bus */
267 	uint32		intstatus;		/* Intstatus bits (events) pending */
268 	bool		dpc_sched;		/* Indicates DPC schedule (intrpt rcvd) */
269 	bool		fcstate;		/* State of dongle flow-control */
270 
271 	uint16		cl_devid;		/* cached devid for dhdsdio_probe_attach() */
272 	char		*fw_path;		/* module_param: path to firmware image */
273 	char		*nv_path;		/* module_param: path to nvram vars file */
274 
275 	struct pktq	txq;			/* Queue length used for flow-control */
276 
277 	bool		intr;			/* Use interrupts */
278 	bool		ipend;			/* Device interrupt is pending */
279 	bool		intdis;			/* Interrupts disabled by isr */
280 	uint		intrcount;		/* Count of device interrupt callbacks */
281 	uint		lastintrs;		/* Count as of last watchdog timer */
282 
283 	dhd_console_t	console;		/* Console output polling support */
284 	uint		console_addr;		/* Console address from shared struct */
285 
286 	bool		alp_only;		/* Don't use HT clock (ALP only) */
287 
288 	bool		remap;		/* Contiguous 1MB RAM: 512K socram + 512K devram
289 					 * Available with socram rev 16
290 					 * Remap region not DMA-able
291 					 */
292 	uint32		resetinstr;
293 	uint32		dongle_ram_base;
294 
295 	ulong		shared_addr;
296 	pciedev_shared_t	*pcie_sh;
297 	uint32		dma_rxoffset;
298 	volatile char	*regs;		/* pci device memory va */
299 	volatile char	*tcm;		/* pci device memory va */
300 	osl_t		*osh;
301 	uint32		nvram_csm;	/* Nvram checksum */
302 	uint16		pollrate;
303 	uint16  polltick;
304 
305 	volatile uint32  *pcie_mb_intr_addr;
306 	volatile uint32  *pcie_mb_intr_2_addr;
307 	void    *pcie_mb_intr_osh;
308 	bool	sleep_allowed;
309 
310 	wake_counts_t	wake_counts;
311 
312 	/* version 3 shared struct related info start */
313 	ring_sh_info_t	ring_sh[BCMPCIE_COMMON_MSGRINGS + MAX_DHD_TX_FLOWS];
314 
315 	uint8	h2d_ring_count;
316 	uint8	d2h_ring_count;
317 	uint32  ringmem_ptr;
318 	uint32  ring_state_ptr;
319 
320 	uint32 d2h_dma_scratch_buffer_mem_addr;
321 
322 	uint32 h2d_mb_data_ptr_addr;
323 	uint32 d2h_mb_data_ptr_addr;
324 	/* version 3 shared struct related info end */
325 
326 	uint32 def_intmask;
327 	uint32 d2h_mb_mask;
328 	uint32 pcie_mailbox_mask;
329 	uint32 pcie_mailbox_int;
330 	bool	ltrsleep_on_unload;
331 	uint	wait_for_d3_ack;
332 	uint16	max_tx_flowrings;
333 	uint16	max_submission_rings;
334 	uint16	max_completion_rings;
335 	uint16	max_cmn_rings;
336 	uint32	rw_index_sz;
337 	bool	db1_for_mb;
338 
339 	dhd_timeout_t doorbell_timer;
340 	bool	device_wake_state;
341 	bool	irq_registered;
342 	bool	d2h_intr_method;
343 #ifdef SUPPORT_LINKDOWN_RECOVERY
344 #if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \
345 	defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895) || \
346 	defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820))
347 #ifdef CONFIG_ARCH_MSM
348 	uint8 no_cfg_restore;
349 #endif /* CONFIG_ARCH_MSM */
350 	struct_pcie_register_event pcie_event;
351 #endif /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY &&
352 	* (CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 ||
353 	* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 ))
354 	*/
355 	bool read_shm_fail;
356 #endif /* SUPPORT_LINKDOWN_RECOVERY */
357 	int32 idletime;                 /* Control for activity timeout */
358 #ifdef DHD_PCIE_RUNTIMEPM
359 	int32 idlecount;                /* Activity timeout counter */
360 	int32 bus_wake;                 /* For wake up the bus */
361 	bool runtime_resume_done;       /* For check runtime suspend end */
362 	struct mutex pm_lock;            /* Synchronize for system PM & runtime PM */
363 	wait_queue_head_t rpm_queue;    /* wait-queue for bus wake up */
364 #endif /* DHD_PCIE_RUNTIMEPM */
365 	uint32 d3_inform_cnt;
366 	uint32 d0_inform_cnt;
367 	uint32 d0_inform_in_use_cnt;
368 	uint8 force_suspend;
369 	uint8 is_linkdown;
370 	uint8 no_bus_init;
371 #ifdef IDLE_TX_FLOW_MGMT
372 	bool enable_idle_flowring_mgmt;
373 #endif /* IDLE_TX_FLOW_MGMT */
374 	struct	dhd_pcie_rev api;
375 	bool use_mailbox;
376 	bool    use_d0_inform;
377 	void	*bus_lock;
378 	void *backplane_access_lock;
379 	enum dhd_bus_low_power_state bus_low_power_state;
380 	uint32  hostready_count; /* Number of hostready issued */
381 #if defined(BCMPCIE_OOB_HOST_WAKE)
382 	bool	oob_presuspend;
383 #endif // endif
384 	dhdpcie_config_save_t saved_config;
385 	ulong resume_intr_enable_count;
386 	ulong dpc_intr_enable_count;
387 	ulong isr_intr_disable_count;
388 	ulong suspend_intr_disable_count;
389 	ulong dpc_return_busdown_count;
390 	ulong non_ours_irq_count;
391 #ifdef BCMPCIE_OOB_HOST_WAKE
392 	ulong oob_intr_count;
393 	ulong oob_intr_enable_count;
394 	ulong oob_intr_disable_count;
395 	uint64 last_oob_irq_time;
396 	uint64 last_oob_irq_enable_time;
397 	uint64 last_oob_irq_disable_time;
398 #endif /* BCMPCIE_OOB_HOST_WAKE */
399 	uint64 isr_entry_time;
400 	uint64 isr_exit_time;
401 	uint64 dpc_sched_time;
402 	uint64 dpc_entry_time;
403 	uint64 dpc_exit_time;
404 	uint64 resched_dpc_time;
405 	uint64 last_d3_inform_time;
406 	uint64 last_process_ctrlbuf_time;
407 	uint64 last_process_flowring_time;
408 	uint64 last_process_txcpl_time;
409 	uint64 last_process_rxcpl_time;
410 	uint64 last_process_infocpl_time;
411 	uint64 last_process_edl_time;
412 	uint64 last_suspend_start_time;
413 	uint64 last_suspend_end_time;
414 	uint64 last_resume_start_time;
415 	uint64 last_resume_end_time;
416 	uint64 last_non_ours_irq_time;
417 	uint8 hwa_enab_bmap;
418 	bool  idma_enabled;
419 	bool  ifrm_enabled;
420 	bool  dar_enabled;
421 	uint32 dmaxfer_complete;
422 	uint8	dw_option;
423 #ifdef DHD_PCIE_RUNTIMEPM
424 	bool chk_pm;	/* To avoid counting of wake up from Runtime PM */
425 #endif /* DHD_PCIE_RUNTIMEPM */
426 	bool _dar_war;
427 	uint8  dma_chan;
428 	bool	cto_enable;	/* enable PCIE CTO Prevention and recovery */
429 	uint32  cto_threshold;  /* PCIE CTO timeout threshold */
430 	bool	cto_triggered;	/* CTO is triggered */
431 	int	pwr_req_ref;
432 	bool flr_force_fail; /* user intends to simulate flr force fail */
433 	bool intr_enabled; /* ready to receive interrupts from dongle */
434 	bool force_bt_quiesce; /* send bt_quiesce command to BT driver. */
435 #if defined(DHD_H2D_LOG_TIME_SYNC)
436 	ulong dhd_rte_time_sync_count; /* OSL_SYSUPTIME_US() */
437 #endif /* DHD_H2D_LOG_TIME_SYNC */
438 	bool rc_ep_aspm_cap; /* RC and EP ASPM capable */
439 	bool rc_ep_l1ss_cap; /* EC and EP L1SS capable */
440 	uint16 hp2p_txcpl_max_items;
441 	uint16 hp2p_rxcpl_max_items;
442 	/* PCIE coherent status */
443 	uint32 coherent_state;
444 } dhd_bus_t;
445 
446 #ifdef DHD_MSI_SUPPORT
447 extern uint enable_msi;
448 #endif /* DHD_MSI_SUPPORT */
449 
450 enum {
451 	PCIE_INTX = 0,
452 	PCIE_MSI = 1
453 };
454 
455 /* function declarations */
456 
457 extern uint32* dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size);
458 extern int dhdpcie_bus_register(void);
459 extern void dhdpcie_bus_unregister(void);
460 extern bool dhdpcie_chipmatch(uint16 vendor, uint16 device);
461 
462 extern int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr,
463 	volatile char *regs, volatile char *tcm, void *pci_dev);
464 extern uint32 dhdpcie_bus_cfg_read_dword(struct dhd_bus *bus, uint32 addr, uint32 size);
465 extern void dhdpcie_bus_cfg_write_dword(struct dhd_bus *bus, uint32 addr, uint32 size, uint32 data);
466 extern void dhdpcie_bus_intr_enable(struct dhd_bus *bus);
467 extern void dhdpcie_bus_intr_disable(struct dhd_bus *bus);
468 extern int dhpcie_bus_mask_interrupt(dhd_bus_t *bus);
469 extern void dhdpcie_bus_release(struct dhd_bus *bus);
470 extern int32 dhdpcie_bus_isr(struct dhd_bus *bus);
471 extern void dhdpcie_free_irq(dhd_bus_t *bus);
472 extern void dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value);
473 extern void dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake);
474 extern void dhdpcie_dongle_reset(dhd_bus_t *bus);
475 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
476 extern int dhdpcie_bus_suspend(struct  dhd_bus *bus, bool state, bool byint);
477 #else
478 extern int dhdpcie_bus_suspend(struct  dhd_bus *bus, bool state);
479 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
480 extern int dhdpcie_pci_suspend_resume(struct  dhd_bus *bus, bool state);
481 extern uint32 dhdpcie_force_alp(struct dhd_bus *bus, bool enable);
482 extern uint32 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int force_l1_entry_time);
483 extern bool dhdpcie_tcm_valid(dhd_bus_t *bus);
484 extern void dhdpcie_pme_active(osl_t *osh, bool enable);
485 extern bool dhdpcie_pme_cap(osl_t *osh);
486 extern uint32 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val);
487 extern void dhdpcie_set_pmu_min_res_mask(struct dhd_bus *bus, uint min_res_mask);
488 extern uint8 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val);
489 extern int dhdpcie_disable_irq(dhd_bus_t *bus);
490 extern int dhdpcie_disable_irq_nosync(dhd_bus_t *bus);
491 extern int dhdpcie_enable_irq(dhd_bus_t *bus);
492 
493 extern void dhd_bus_dump_dar_registers(struct dhd_bus *bus);
494 
495 extern uint32 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset);
496 extern uint32 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
497 		bool is_write, uint32 writeval);
498 extern uint32 dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
499 		bool is_write, uint32 writeval);
500 extern uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus);
501 extern int dhdpcie_start_host_pcieclock(dhd_bus_t *bus);
502 extern int dhdpcie_stop_host_pcieclock(dhd_bus_t *bus);
503 extern int dhdpcie_disable_device(dhd_bus_t *bus);
504 extern int dhdpcie_alloc_resource(dhd_bus_t *bus);
505 extern void dhdpcie_free_resource(dhd_bus_t *bus);
506 extern void dhdpcie_dump_resource(dhd_bus_t *bus);
507 extern int dhdpcie_bus_request_irq(struct dhd_bus *bus);
508 void dhdpcie_os_setbar1win(dhd_bus_t *bus, uint32 addr);
509 void dhdpcie_os_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
510 uint8 dhdpcie_os_rtcm8(dhd_bus_t *bus, ulong offset);
511 void dhdpcie_os_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
512 uint16 dhdpcie_os_rtcm16(dhd_bus_t *bus, ulong offset);
513 void dhdpcie_os_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
514 uint32 dhdpcie_os_rtcm32(dhd_bus_t *bus, ulong offset);
515 #ifdef DHD_SUPPORT_64BIT
516 void dhdpcie_os_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
517 uint64 dhdpcie_os_rtcm64(dhd_bus_t *bus, ulong offset);
518 #endif // endif
519 
520 extern int dhdpcie_enable_device(dhd_bus_t *bus);
521 
522 #ifdef BCMPCIE_OOB_HOST_WAKE
523 extern int dhdpcie_oob_intr_register(dhd_bus_t *bus);
524 extern void dhdpcie_oob_intr_unregister(dhd_bus_t *bus);
525 extern void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable);
526 extern int dhdpcie_get_oob_irq_num(struct dhd_bus *bus);
527 extern int dhdpcie_get_oob_irq_status(struct dhd_bus *bus);
528 extern int dhdpcie_get_oob_irq_level(void);
529 #endif /* BCMPCIE_OOB_HOST_WAKE */
530 
531 #if defined(CONFIG_ARCH_EXYNOS)
532 #define SAMSUNG_PCIE_VENDOR_ID 0x144d
533 #if defined(CONFIG_MACH_UNIVERSAL5433)
534 #define SAMSUNG_PCIE_DEVICE_ID 0xa5e3
535 #define SAMSUNG_PCIE_CH_NUM
536 #elif defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS7420)
537 #define SAMSUNG_PCIE_DEVICE_ID 0xa575
538 #define SAMSUNG_PCIE_CH_NUM 1
539 #elif defined(CONFIG_SOC_EXYNOS8890)
540 #define SAMSUNG_PCIE_DEVICE_ID 0xa544
541 #define SAMSUNG_PCIE_CH_NUM 0
542 #elif defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
543 	defined(CONFIG_SOC_EXYNOS9820)
544 #define SAMSUNG_PCIE_DEVICE_ID 0xecec
545 #define SAMSUNG_PCIE_CH_NUM 0
546 #else
547 #error "Not supported platform"
548 #endif /* CONFIG_SOC_EXYNOSXXXX & CONFIG_MACH_UNIVERSALXXXX */
549 #endif /* CONFIG_ARCH_EXYNOS */
550 
551 #if defined(CONFIG_ARCH_MSM)
552 #define MSM_PCIE_VENDOR_ID 0x17cb
553 #if defined(CONFIG_ARCH_APQ8084)
554 #define MSM_PCIE_DEVICE_ID 0x0101
555 #elif defined(CONFIG_ARCH_MSM8994)
556 #define MSM_PCIE_DEVICE_ID 0x0300
557 #elif defined(CONFIG_ARCH_MSM8996)
558 #define MSM_PCIE_DEVICE_ID 0x0104
559 #elif defined(CONFIG_ARCH_MSM8998)
560 #define MSM_PCIE_DEVICE_ID 0x0105
561 #elif defined(CONFIG_ARCH_SDM845) || defined(CONFIG_ARCH_SM8150)
562 #define MSM_PCIE_DEVICE_ID 0x0106
563 #elif defined(USE_CUSTOM_MSM_PCIE)
564 #define MSM_PCIE_DEVICE_ID MSM_PCIE_CUSTOM_DEVICE_ID
565 #else
566 #error "Not supported platform"
567 #endif // endif
568 #endif /* CONFIG_ARCH_MSM */
569 
570 #if defined(CONFIG_X86)
571 #define X86_PCIE_VENDOR_ID 0x8086
572 #define X86_PCIE_DEVICE_ID 0x9c1a
573 #endif /* CONFIG_X86 */
574 
575 #if defined(CONFIG_ARCH_TEGRA)
576 #define TEGRA_PCIE_VENDOR_ID 0x14e4
577 #define TEGRA_PCIE_DEVICE_ID 0x4347
578 #endif /* CONFIG_ARCH_TEGRA */
579 
580 #if defined(BOARD_HIKEY)
581 #define HIKEY_PCIE_VENDOR_ID 0x19e5
582 #define HIKEY_PCIE_DEVICE_ID 0x3660
583 #endif /* BOARD_HIKEY */
584 
585 #define DUMMY_PCIE_VENDOR_ID 0xffff
586 #define DUMMY_PCIE_DEVICE_ID 0xffff
587 
588 #if defined(CONFIG_ARCH_EXYNOS)
589 #define PCIE_RC_VENDOR_ID SAMSUNG_PCIE_VENDOR_ID
590 #define PCIE_RC_DEVICE_ID SAMSUNG_PCIE_DEVICE_ID
591 #elif defined(CONFIG_ARCH_MSM)
592 #define PCIE_RC_VENDOR_ID MSM_PCIE_VENDOR_ID
593 #define PCIE_RC_DEVICE_ID MSM_PCIE_DEVICE_ID
594 #elif defined(CONFIG_X86)
595 #define PCIE_RC_VENDOR_ID X86_PCIE_VENDOR_ID
596 #define PCIE_RC_DEVICE_ID X86_PCIE_DEVICE_ID
597 #elif defined(CONFIG_ARCH_TEGRA)
598 #define PCIE_RC_VENDOR_ID TEGRA_PCIE_VENDOR_ID
599 #define PCIE_RC_DEVICE_ID TEGRA_PCIE_DEVICE_ID
600 #elif defined(BOARD_HIKEY)
601 #define PCIE_RC_VENDOR_ID HIKEY_PCIE_VENDOR_ID
602 #define PCIE_RC_DEVICE_ID HIKEY_PCIE_DEVICE_ID
603 #else
604 /* Use dummy vendor and device IDs */
605 #define PCIE_RC_VENDOR_ID DUMMY_PCIE_VENDOR_ID
606 #define PCIE_RC_DEVICE_ID DUMMY_PCIE_DEVICE_ID
607 #endif /* CONFIG_ARCH_EXYNOS */
608 
609 #define DHD_REGULAR_RING    0
610 #define DHD_HP2P_RING    1
611 
612 #ifdef USE_EXYNOS_PCIE_RC_PMPATCH
613 #ifdef CONFIG_MACH_UNIVERSAL5433
614 extern int exynos_pcie_pm_suspend(void);
615 extern int exynos_pcie_pm_resume(void);
616 #else
617 extern int exynos_pcie_pm_suspend(int ch_num);
618 extern int exynos_pcie_pm_resume(int ch_num);
619 #endif /* CONFIG_MACH_UNIVERSAL5433 */
620 #endif /* USE_EXYNOS_PCIE_RC_PMPATCH */
621 
622 #ifdef CONFIG_ARCH_TEGRA
623 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0))
624 extern int tegra_pcie_pm_suspend(void);
625 extern int tegra_pcie_pm_resume(void);
626 #endif // endif
627 #endif /* CONFIG_ARCH_TEGRA */
628 
629 extern int dhd_buzzz_dump_dngl(dhd_bus_t *bus);
630 #ifdef IDLE_TX_FLOW_MGMT
631 extern int dhd_bus_flow_ring_resume_request(struct dhd_bus *bus, void *arg);
632 extern void dhd_bus_flow_ring_resume_response(struct dhd_bus *bus, uint16 flowid, int32 status);
633 extern int dhd_bus_flow_ring_suspend_request(struct dhd_bus *bus, void *arg);
634 extern void dhd_bus_flow_ring_suspend_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
635 extern void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus,
636 	flow_ring_node_t *flow_ring_node);
637 extern void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus,
638 	flow_ring_node_t *flow_ring_node);
639 extern void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus,
640 	flow_ring_node_t *flow_ring_node);
641 extern void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus,
642 	flow_ring_node_t *flow_ring_node);
643 #endif /* IDLE_TX_FLOW_MGMT */
644 
645 extern int dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data);
646 
647 #ifdef DHD_WAKE_STATUS
648 int bcmpcie_get_total_wake(struct dhd_bus *bus);
649 int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag);
650 #endif /* DHD_WAKE_STATUS */
651 extern bool dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus);
652 extern void dhd_bus_hostready(struct  dhd_bus *bus);
653 extern void dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option);
654 extern int dhdpcie_irq_disabled(struct dhd_bus *bus);
655 
dhdpcie_is_arm_halted(struct dhd_bus * bus)656 static INLINE bool dhdpcie_is_arm_halted(struct dhd_bus *bus) {return TRUE;}
dhd_os_wifi_platform_set_power(uint32 value)657 static INLINE int dhd_os_wifi_platform_set_power(uint32 value) {return BCME_OK; }
658 static INLINE void
dhdpcie_dongle_flr_or_pwr_toggle(dhd_bus_t * bus)659 dhdpcie_dongle_flr_or_pwr_toggle(dhd_bus_t *bus)
660 { return; }
661 
662 int dhdpcie_config_check(dhd_bus_t *bus);
663 int dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr);
664 int dhdpcie_config_save(dhd_bus_t *bus);
665 int dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state);
666 
667 extern bool dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus);
668 extern bool dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus);
669 extern bool dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus);
670 extern bool dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus);
671 
672 static INLINE uint32
dhd_pcie_config_read(osl_t * osh,uint offset,uint size)673 dhd_pcie_config_read(osl_t *osh, uint offset, uint size)
674 {
675 	OSL_DELAY(100);
676 	return OSL_PCI_READ_CONFIG(osh, offset, size);
677 }
678 
679 static INLINE uint32
dhd_pcie_corereg_read(si_t * sih,uint val)680 dhd_pcie_corereg_read(si_t *sih, uint val)
681 {
682 	OSL_DELAY(100);
683 	si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, val);
684 	return si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), 0, 0);
685 }
686 
687 extern int dhdpcie_get_fwpath_otp(dhd_bus_t *bus, char *fw_path, char *nv_path,
688 		char *clm_path, char *txcap_path);
689 
690 extern int dhd_pcie_debug_info_dump(dhd_pub_t *dhd);
691 extern void dhd_pcie_intr_count_dump(dhd_pub_t *dhd);
692 extern void dhdpcie_bus_clear_intstatus(dhd_bus_t *bus);
693 #ifdef DHD_HP2P
694 extern uint16 dhd_bus_get_hp2p_ring_max_size(dhd_bus_t *bus, bool tx);
695 #endif // endif
696 
697 #endif /* dhd_pcie_h */
698