1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Linux DHD Bus Module for PCIE
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2020, Broadcom.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Unless you and Broadcom execute a separate written software license
7*4882a593Smuzhiyun * agreement governing use of this software, this software is licensed to you
8*4882a593Smuzhiyun * under the terms of the GNU General Public License version 2 (the "GPL"),
9*4882a593Smuzhiyun * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10*4882a593Smuzhiyun * following added to such license:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * As a special exception, the copyright holders of this software give you
13*4882a593Smuzhiyun * permission to link this software with independent modules, and to copy and
14*4882a593Smuzhiyun * distribute the resulting executable under terms of your choice, provided that
15*4882a593Smuzhiyun * you also meet, for each linked independent module, the terms and conditions of
16*4882a593Smuzhiyun * the license of that module. An independent module is a module which is not
17*4882a593Smuzhiyun * derived from this software. The special exception does not apply to any
18*4882a593Smuzhiyun * modifications of the software.
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * <<Broadcom-WL-IPTag/Open:>>
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * $Id$
24*4882a593Smuzhiyun */
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #ifndef dhd_pcie_h
27*4882a593Smuzhiyun #define dhd_pcie_h
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <bcmpcie.h>
30*4882a593Smuzhiyun #include <hnd_cons.h>
31*4882a593Smuzhiyun #include <dhd_linux.h>
32*4882a593Smuzhiyun #ifdef SUPPORT_LINKDOWN_RECOVERY
33*4882a593Smuzhiyun #ifdef CONFIG_ARCH_MSM
34*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSM
35*4882a593Smuzhiyun #include <linux/msm_pcie.h>
36*4882a593Smuzhiyun #else
37*4882a593Smuzhiyun #include <mach/msm_pcie.h>
38*4882a593Smuzhiyun #endif /* CONFIG_PCI_MSM */
39*4882a593Smuzhiyun #endif /* CONFIG_ARCH_MSM */
40*4882a593Smuzhiyun #ifdef CONFIG_ARCH_EXYNOS
41*4882a593Smuzhiyun #ifndef SUPPORT_EXYNOS7420
42*4882a593Smuzhiyun #include <linux/exynos-pci-noti.h>
43*4882a593Smuzhiyun extern int exynos_pcie_register_event(struct exynos_pcie_register_event *reg);
44*4882a593Smuzhiyun extern int exynos_pcie_deregister_event(struct exynos_pcie_register_event *reg);
45*4882a593Smuzhiyun #endif /* !SUPPORT_EXYNOS7420 */
46*4882a593Smuzhiyun #endif /* CONFIG_ARCH_EXYNOS */
47*4882a593Smuzhiyun #endif /* SUPPORT_LINKDOWN_RECOVERY */
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #ifdef DHD_PCIE_RUNTIMEPM
50*4882a593Smuzhiyun #include <linux/mutex.h>
51*4882a593Smuzhiyun #include <linux/wait.h>
52*4882a593Smuzhiyun #endif /* DHD_PCIE_RUNTIMEPM */
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* defines */
55*4882a593Smuzhiyun #define PCIE_SHARED_VERSION PCIE_SHARED_VERSION_7
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun #define PCMSGBUF_HDRLEN 0
58*4882a593Smuzhiyun #define DONGLE_REG_MAP_SIZE (32 * 1024)
59*4882a593Smuzhiyun #define DONGLE_TCM_MAP_SIZE (4096 * 1024)
60*4882a593Smuzhiyun #define DONGLE_MIN_MEMSIZE (128 *1024)
61*4882a593Smuzhiyun #ifdef DHD_DEBUG
62*4882a593Smuzhiyun #define DHD_PCIE_SUCCESS 0
63*4882a593Smuzhiyun #define DHD_PCIE_FAILURE 1
64*4882a593Smuzhiyun #endif /* DHD_DEBUG */
65*4882a593Smuzhiyun #define REMAP_ENAB(bus) ((bus)->remap)
66*4882a593Smuzhiyun #define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize)))
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun #ifdef SUPPORT_LINKDOWN_RECOVERY
69*4882a593Smuzhiyun #ifdef CONFIG_ARCH_MSM
70*4882a593Smuzhiyun #define struct_pcie_notify struct msm_pcie_notify
71*4882a593Smuzhiyun #define struct_pcie_register_event struct msm_pcie_register_event
72*4882a593Smuzhiyun #endif /* CONFIG_ARCH_MSM */
73*4882a593Smuzhiyun #ifdef CONFIG_ARCH_EXYNOS
74*4882a593Smuzhiyun #ifndef SUPPORT_EXYNOS7420
75*4882a593Smuzhiyun #define struct_pcie_notify struct exynos_pcie_notify
76*4882a593Smuzhiyun #define struct_pcie_register_event struct exynos_pcie_register_event
77*4882a593Smuzhiyun #endif /* !SUPPORT_EXYNOS7420 */
78*4882a593Smuzhiyun #endif /* CONFIG_ARCH_EXYNOS */
79*4882a593Smuzhiyun #endif /* SUPPORT_LINKDOWN_RECOVERY */
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #define MAX_DHD_TX_FLOWS 320
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /* user defined data structures */
84*4882a593Smuzhiyun /* Device console log buffer state */
85*4882a593Smuzhiyun #define CONSOLE_LINE_MAX 192u
86*4882a593Smuzhiyun #define CONSOLE_BUFFER_MAX (8 * 1024)
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun #ifdef IDLE_TX_FLOW_MGMT
89*4882a593Smuzhiyun #define IDLE_FLOW_LIST_TIMEOUT 5000
90*4882a593Smuzhiyun #define IDLE_FLOW_RING_TIMEOUT 5000
91*4882a593Smuzhiyun #endif /* IDLE_TX_FLOW_MGMT */
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun #ifdef DEVICE_TX_STUCK_DETECT
94*4882a593Smuzhiyun #define DEVICE_TX_STUCK_CKECK_TIMEOUT 1000 /* 1 sec */
95*4882a593Smuzhiyun #define DEVICE_TX_STUCK_TIMEOUT 10000 /* 10 secs */
96*4882a593Smuzhiyun #define DEVICE_TX_STUCK_WARN_DURATION (DEVICE_TX_STUCK_TIMEOUT / DEVICE_TX_STUCK_CKECK_TIMEOUT)
97*4882a593Smuzhiyun #define DEVICE_TX_STUCK_DURATION (DEVICE_TX_STUCK_WARN_DURATION * 2)
98*4882a593Smuzhiyun #endif /* DEVICE_TX_STUCK_DETECT */
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /* implicit DMA for h2d wr and d2h rd indice from Host memory to TCM */
101*4882a593Smuzhiyun #define IDMA_ENAB(dhd) ((dhd) && (dhd)->idma_enable)
102*4882a593Smuzhiyun #define IDMA_ACTIVE(dhd) ((dhd) && ((dhd)->idma_enable) && ((dhd)->idma_inited))
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun #define IDMA_CAPABLE(bus) (((bus)->sih->buscorerev == 19) || ((bus)->sih->buscorerev >= 23))
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /* IFRM (Implicit Flow Ring Manager enable and inited */
107*4882a593Smuzhiyun #define IFRM_ENAB(dhd) ((dhd) && (dhd)->ifrm_enable)
108*4882a593Smuzhiyun #define IFRM_ACTIVE(dhd) ((dhd) && ((dhd)->ifrm_enable) && ((dhd)->ifrm_inited))
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* DAR registers use for h2d doorbell */
111*4882a593Smuzhiyun #define DAR_ENAB(dhd) ((dhd) && (dhd)->dar_enable)
112*4882a593Smuzhiyun #define DAR_ACTIVE(dhd) ((dhd) && ((dhd)->dar_enable) && ((dhd)->dar_inited))
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /* DAR WAR for revs < 64 */
115*4882a593Smuzhiyun #define DAR_PWRREQ(bus) (((bus)->_dar_war) && DAR_ACTIVE((bus)->dhd))
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* PCIE CTO Prevention and Recovery */
118*4882a593Smuzhiyun #define PCIECTO_ENAB(bus) ((bus)->cto_enable)
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /* Implicit DMA index usage :
121*4882a593Smuzhiyun * Index 0 for h2d write index transfer
122*4882a593Smuzhiyun * Index 1 for d2h read index transfer
123*4882a593Smuzhiyun */
124*4882a593Smuzhiyun #define IDMA_IDX0 0
125*4882a593Smuzhiyun #define IDMA_IDX1 1
126*4882a593Smuzhiyun #define IDMA_IDX2 2
127*4882a593Smuzhiyun #define IDMA_IDX3 3
128*4882a593Smuzhiyun #define DMA_TYPE_SHIFT 4
129*4882a593Smuzhiyun #define DMA_TYPE_IDMA 1
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun #define DHDPCIE_CONFIG_HDR_SIZE 16
132*4882a593Smuzhiyun #define DHDPCIE_CONFIG_CHECK_DELAY_MS 10 /* 10ms */
133*4882a593Smuzhiyun #define DHDPCIE_CONFIG_CHECK_RETRY_COUNT 20
134*4882a593Smuzhiyun #define DHDPCIE_DONGLE_PWR_TOGGLE_DELAY 1000 /* 1ms in units of us */
135*4882a593Smuzhiyun #define DHDPCIE_PM_D3_DELAY 200000 /* 200ms in units of us */
136*4882a593Smuzhiyun #define DHDPCIE_PM_D2_DELAY 200 /* 200us */
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun typedef struct dhd_console {
139*4882a593Smuzhiyun uint count; /* Poll interval msec counter */
140*4882a593Smuzhiyun uint log_addr; /* Log struct address (fixed) */
141*4882a593Smuzhiyun hnd_log_t log; /* Log struct (host copy) */
142*4882a593Smuzhiyun uint bufsize; /* Size of log buffer */
143*4882a593Smuzhiyun uint8 *buf; /* Log buffer (host copy) */
144*4882a593Smuzhiyun uint last; /* Last buffer read index */
145*4882a593Smuzhiyun } dhd_console_t;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun typedef struct ring_sh_info {
148*4882a593Smuzhiyun uint32 ring_mem_addr;
149*4882a593Smuzhiyun uint32 ring_state_w;
150*4882a593Smuzhiyun uint32 ring_state_r;
151*4882a593Smuzhiyun pcie_hwa_db_index_t ring_hwa_db_idx; /* HWA DB index value per ring */
152*4882a593Smuzhiyun } ring_sh_info_t;
153*4882a593Smuzhiyun #define MAX_DS_TRACE_SIZE 50
154*4882a593Smuzhiyun #ifdef DHD_MMIO_TRACE
155*4882a593Smuzhiyun #define MAX_MMIO_TRACE_SIZE 256
156*4882a593Smuzhiyun /* Minimum of 250us should be elapsed to add new entry */
157*4882a593Smuzhiyun #define MIN_MMIO_TRACE_TIME 250
158*4882a593Smuzhiyun #define DHD_RING_IDX 0x00FF0000
159*4882a593Smuzhiyun typedef struct _dhd_mmio_trace_t {
160*4882a593Smuzhiyun uint64 timestamp;
161*4882a593Smuzhiyun uint32 addr;
162*4882a593Smuzhiyun uint32 value;
163*4882a593Smuzhiyun bool set;
164*4882a593Smuzhiyun } dhd_mmio_trace_t;
165*4882a593Smuzhiyun #endif /* defined(DHD_MMIO_TRACE) */
166*4882a593Smuzhiyun typedef struct _dhd_ds_trace_t {
167*4882a593Smuzhiyun uint64 timestamp;
168*4882a593Smuzhiyun bool d2h;
169*4882a593Smuzhiyun uint32 dsval;
170*4882a593Smuzhiyun #ifdef PCIE_INB_DW
171*4882a593Smuzhiyun enum dhd_bus_ds_state inbstate;
172*4882a593Smuzhiyun #endif /* PCIE_INB_DW */
173*4882a593Smuzhiyun } dhd_ds_trace_t;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun #define DEVICE_WAKE_NONE 0
176*4882a593Smuzhiyun #define DEVICE_WAKE_OOB 1
177*4882a593Smuzhiyun #define DEVICE_WAKE_INB 2
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun #define INBAND_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_INB)
180*4882a593Smuzhiyun #define OOB_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_OOB)
181*4882a593Smuzhiyun #define NO_DW_ENAB(bus) ((bus)->dw_option == DEVICE_WAKE_NONE)
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun #define PCIE_PWR_REQ_RELOAD_WAR_ENAB(buscorerev) \
184*4882a593Smuzhiyun ((buscorerev == 66) || (buscorerev == 67) || (buscorerev == 68) || \
185*4882a593Smuzhiyun (buscorerev == 70) || (buscorerev == 72))
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun #define PCIE_FASTLPO_ENABLED(buscorerev) \
188*4882a593Smuzhiyun ((buscorerev == 66) || (buscorerev == 67) || (buscorerev == 68) || \
189*4882a593Smuzhiyun (buscorerev == 70) || (buscorerev == 72))
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /*
192*4882a593Smuzhiyun * HW JIRA - CRWLPCIEGEN2-672
193*4882a593Smuzhiyun * Producer Index Feature which is used by F1 gets reset on F0 FLR
194*4882a593Smuzhiyun * fixed in REV68
195*4882a593Smuzhiyun */
196*4882a593Smuzhiyun #define PCIE_ENUM_RESET_WAR_ENAB(buscorerev) \
197*4882a593Smuzhiyun ((buscorerev == 66) || (buscorerev == 67))
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun struct dhd_bus;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun struct dhd_pcie_rev {
202*4882a593Smuzhiyun uint8 fw_rev;
203*4882a593Smuzhiyun void (*handle_mb_data)(struct dhd_bus *);
204*4882a593Smuzhiyun };
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun typedef struct dhdpcie_config_save
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun uint32 header[DHDPCIE_CONFIG_HDR_SIZE];
209*4882a593Smuzhiyun /* pmcsr save */
210*4882a593Smuzhiyun uint32 pmcsr;
211*4882a593Smuzhiyun /* express save */
212*4882a593Smuzhiyun uint32 exp_dev_ctrl_stat;
213*4882a593Smuzhiyun uint32 exp_link_ctrl_stat;
214*4882a593Smuzhiyun uint32 exp_dev_ctrl_stat2;
215*4882a593Smuzhiyun uint32 exp_link_ctrl_stat2;
216*4882a593Smuzhiyun /* msi save */
217*4882a593Smuzhiyun uint32 msi_cap;
218*4882a593Smuzhiyun uint32 msi_addr0;
219*4882a593Smuzhiyun uint32 msi_addr1;
220*4882a593Smuzhiyun uint32 msi_data;
221*4882a593Smuzhiyun /* l1pm save */
222*4882a593Smuzhiyun uint32 l1pm0;
223*4882a593Smuzhiyun uint32 l1pm1;
224*4882a593Smuzhiyun /* ltr save */
225*4882a593Smuzhiyun uint32 ltr;
226*4882a593Smuzhiyun /* aer save */
227*4882a593Smuzhiyun uint32 aer_caps_ctrl; /* 0x18 */
228*4882a593Smuzhiyun uint32 aer_severity; /* 0x0C */
229*4882a593Smuzhiyun uint32 aer_umask; /* 0x08 */
230*4882a593Smuzhiyun uint32 aer_cmask; /* 0x14 */
231*4882a593Smuzhiyun uint32 aer_root_cmd; /* 0x2c */
232*4882a593Smuzhiyun /* BAR0 and BAR1 windows */
233*4882a593Smuzhiyun uint32 bar0_win;
234*4882a593Smuzhiyun uint32 bar1_win;
235*4882a593Smuzhiyun } dhdpcie_config_save_t;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /* The level of bus communication with the dongle */
238*4882a593Smuzhiyun enum dhd_bus_low_power_state {
239*4882a593Smuzhiyun DHD_BUS_NO_LOW_POWER_STATE, /* Not in low power state */
240*4882a593Smuzhiyun DHD_BUS_D3_INFORM_SENT, /* D3 INFORM sent */
241*4882a593Smuzhiyun DHD_BUS_D3_ACK_RECIEVED, /* D3 ACK recieved */
242*4882a593Smuzhiyun };
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun #ifdef DHD_FLOW_RING_STATUS_TRACE
245*4882a593Smuzhiyun #define FRS_TRACE_SIZE 32 /* frs - flow_ring_status */
246*4882a593Smuzhiyun typedef struct _dhd_flow_ring_status_trace_t {
247*4882a593Smuzhiyun uint64 timestamp;
248*4882a593Smuzhiyun uint16 h2d_ctrl_post_drd;
249*4882a593Smuzhiyun uint16 h2d_ctrl_post_dwr;
250*4882a593Smuzhiyun uint16 d2h_ctrl_cpln_drd;
251*4882a593Smuzhiyun uint16 d2h_ctrl_cpln_dwr;
252*4882a593Smuzhiyun uint16 h2d_rx_post_drd;
253*4882a593Smuzhiyun uint16 h2d_rx_post_dwr;
254*4882a593Smuzhiyun uint16 d2h_rx_cpln_drd;
255*4882a593Smuzhiyun uint16 d2h_rx_cpln_dwr;
256*4882a593Smuzhiyun uint16 d2h_tx_cpln_drd;
257*4882a593Smuzhiyun uint16 d2h_tx_cpln_dwr;
258*4882a593Smuzhiyun uint16 h2d_info_post_drd;
259*4882a593Smuzhiyun uint16 h2d_info_post_dwr;
260*4882a593Smuzhiyun uint16 d2h_info_cpln_drd;
261*4882a593Smuzhiyun uint16 d2h_info_cpln_dwr;
262*4882a593Smuzhiyun uint16 d2h_ring_edl_drd;
263*4882a593Smuzhiyun uint16 d2h_ring_edl_dwr;
264*4882a593Smuzhiyun } dhd_frs_trace_t;
265*4882a593Smuzhiyun #endif /* DHD_FLOW_RING_STATUS_TRACE */
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /** Instantiated once for each hardware (dongle) instance that this DHD manages */
268*4882a593Smuzhiyun typedef struct dhd_bus {
269*4882a593Smuzhiyun dhd_pub_t *dhd; /**< pointer to per hardware (dongle) unique instance */
270*4882a593Smuzhiyun #if !defined(NDIS)
271*4882a593Smuzhiyun struct pci_dev *rc_dev; /* pci RC device handle */
272*4882a593Smuzhiyun struct pci_dev *dev; /* pci device handle */
273*4882a593Smuzhiyun #endif /* !defined(NDIS) */
274*4882a593Smuzhiyun #ifdef DHD_EFI
275*4882a593Smuzhiyun void *pcie_dev;
276*4882a593Smuzhiyun #endif
277*4882a593Smuzhiyun dll_t flowring_active_list; /* constructed list of tx flowring queues */
278*4882a593Smuzhiyun #ifdef IDLE_TX_FLOW_MGMT
279*4882a593Smuzhiyun uint64 active_list_last_process_ts;
280*4882a593Smuzhiyun /* stores the timestamp of active list processing */
281*4882a593Smuzhiyun #endif /* IDLE_TX_FLOW_MGMT */
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun #ifdef DEVICE_TX_STUCK_DETECT
284*4882a593Smuzhiyun /* Flag to enable/disable device tx stuck monitor by DHD IOVAR dev_tx_stuck_monitor */
285*4882a593Smuzhiyun uint32 dev_tx_stuck_monitor;
286*4882a593Smuzhiyun /* Stores the timestamp (msec) of the last device Tx stuck check */
287*4882a593Smuzhiyun uint32 device_tx_stuck_check;
288*4882a593Smuzhiyun #endif /* DEVICE_TX_STUCK_DETECT */
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun si_t *sih; /* Handle for SI calls */
291*4882a593Smuzhiyun char *vars; /* Variables (from CIS and/or other) */
292*4882a593Smuzhiyun uint varsz; /* Size of variables buffer */
293*4882a593Smuzhiyun uint32 sbaddr; /* Current SB window pointer (-1, invalid) */
294*4882a593Smuzhiyun sbpcieregs_t *reg; /* Registers for PCIE core */
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun uint armrev; /* CPU core revision */
297*4882a593Smuzhiyun uint coreid; /* CPU core id */
298*4882a593Smuzhiyun uint ramrev; /* SOCRAM core revision */
299*4882a593Smuzhiyun uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */
300*4882a593Smuzhiyun uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */
301*4882a593Smuzhiyun uint32 srmemsize; /* Size of SRMEM */
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun uint32 bus; /* gSPI or SDIO bus */
304*4882a593Smuzhiyun uint32 bus_num; /* bus number */
305*4882a593Smuzhiyun uint32 slot_num; /* slot ID */
306*4882a593Smuzhiyun uint32 intstatus; /* Intstatus bits (events) pending */
307*4882a593Smuzhiyun bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */
308*4882a593Smuzhiyun bool fcstate; /* State of dongle flow-control */
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */
311*4882a593Smuzhiyun char *fw_path; /* module_param: path to firmware image */
312*4882a593Smuzhiyun char *nv_path; /* module_param: path to nvram vars file */
313*4882a593Smuzhiyun #ifdef CACHE_FW_IMAGES
314*4882a593Smuzhiyun int processed_nvram_params_len; /* Modified len of NVRAM info */
315*4882a593Smuzhiyun #endif
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun #ifdef BCM_ROUTER_DHD
318*4882a593Smuzhiyun char *nvram_params; /* user specified nvram params. */
319*4882a593Smuzhiyun int nvram_params_len;
320*4882a593Smuzhiyun #endif /* BCM_ROUTER_DHD */
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun struct pktq txq; /* Queue length used for flow-control */
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun bool intr; /* Use interrupts */
325*4882a593Smuzhiyun bool poll; /* Use polling */
326*4882a593Smuzhiyun bool ipend; /* Device interrupt is pending */
327*4882a593Smuzhiyun bool intdis; /* Interrupts disabled by isr */
328*4882a593Smuzhiyun uint intrcount; /* Count of device interrupt callbacks */
329*4882a593Smuzhiyun uint lastintrs; /* Count as of last watchdog timer */
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun dhd_console_t console; /* Console output polling support */
332*4882a593Smuzhiyun uint console_addr; /* Console address from shared struct */
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun bool alp_only; /* Don't use HT clock (ALP only) */
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun bool remap; /* Contiguous 1MB RAM: 512K socram + 512K devram
337*4882a593Smuzhiyun * Available with socram rev 16
338*4882a593Smuzhiyun * Remap region not DMA-able
339*4882a593Smuzhiyun */
340*4882a593Smuzhiyun uint32 resetinstr;
341*4882a593Smuzhiyun uint32 dongle_ram_base;
342*4882a593Smuzhiyun uint32 next_tlv; /* Holds location of next available TLV */
343*4882a593Smuzhiyun ulong shared_addr;
344*4882a593Smuzhiyun pciedev_shared_t *pcie_sh;
345*4882a593Smuzhiyun uint32 dma_rxoffset;
346*4882a593Smuzhiyun volatile char *regs; /* pci device memory va */
347*4882a593Smuzhiyun volatile char *tcm; /* pci device memory va */
348*4882a593Smuzhiyun uint32 bar1_size; /* pci device memory size */
349*4882a593Smuzhiyun uint32 curr_bar1_win; /* current PCIEBar1Window setting */
350*4882a593Smuzhiyun osl_t *osh;
351*4882a593Smuzhiyun uint32 nvram_csm; /* Nvram checksum */
352*4882a593Smuzhiyun #ifdef BCMINTERNAL
353*4882a593Smuzhiyun bool msi_sim;
354*4882a593Smuzhiyun uchar *msi_sim_addr;
355*4882a593Smuzhiyun dmaaddr_t msi_sim_phys;
356*4882a593Smuzhiyun dhd_dma_buf_t hostfw_buf; /* Host offload firmware buffer */
357*4882a593Smuzhiyun uint32 hostfw_base; /* FW assumed base of host offload mem */
358*4882a593Smuzhiyun uint32 bp_base; /* adjusted bp base of host offload mem */
359*4882a593Smuzhiyun #endif /* BCMINTERNAL */
360*4882a593Smuzhiyun uint16 pollrate;
361*4882a593Smuzhiyun uint16 polltick;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun volatile uint32 *pcie_mb_intr_addr;
364*4882a593Smuzhiyun volatile uint32 *pcie_mb_intr_2_addr;
365*4882a593Smuzhiyun void *pcie_mb_intr_osh;
366*4882a593Smuzhiyun bool sleep_allowed;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun wake_counts_t wake_counts;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /* version 3 shared struct related info start */
371*4882a593Smuzhiyun ring_sh_info_t ring_sh[BCMPCIE_COMMON_MSGRINGS + MAX_DHD_TX_FLOWS];
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun uint8 h2d_ring_count;
374*4882a593Smuzhiyun uint8 d2h_ring_count;
375*4882a593Smuzhiyun uint32 ringmem_ptr;
376*4882a593Smuzhiyun uint32 ring_state_ptr;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun uint32 d2h_dma_scratch_buffer_mem_addr;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun uint32 h2d_mb_data_ptr_addr;
381*4882a593Smuzhiyun uint32 d2h_mb_data_ptr_addr;
382*4882a593Smuzhiyun /* version 3 shared struct related info end */
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun uint32 def_intmask;
385*4882a593Smuzhiyun uint32 d2h_mb_mask;
386*4882a593Smuzhiyun uint32 pcie_mailbox_mask;
387*4882a593Smuzhiyun uint32 pcie_mailbox_int;
388*4882a593Smuzhiyun bool ltrsleep_on_unload;
389*4882a593Smuzhiyun uint wait_for_d3_ack;
390*4882a593Smuzhiyun uint16 max_tx_flowrings;
391*4882a593Smuzhiyun uint16 max_submission_rings;
392*4882a593Smuzhiyun uint16 max_completion_rings;
393*4882a593Smuzhiyun uint16 max_cmn_rings;
394*4882a593Smuzhiyun uint32 rw_index_sz;
395*4882a593Smuzhiyun uint32 hwa_db_index_sz;
396*4882a593Smuzhiyun bool db1_for_mb;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun dhd_timeout_t doorbell_timer;
399*4882a593Smuzhiyun bool device_wake_state;
400*4882a593Smuzhiyun #ifdef PCIE_OOB
401*4882a593Smuzhiyun bool oob_enabled;
402*4882a593Smuzhiyun #endif /* PCIE_OOB */
403*4882a593Smuzhiyun bool irq_registered;
404*4882a593Smuzhiyun bool d2h_intr_method;
405*4882a593Smuzhiyun bool d2h_intr_control;
406*4882a593Smuzhiyun #ifdef SUPPORT_LINKDOWN_RECOVERY
407*4882a593Smuzhiyun #if defined(CONFIG_ARCH_MSM) || (defined(CONFIG_ARCH_EXYNOS) && \
408*4882a593Smuzhiyun !defined(SUPPORT_EXYNOS7420))
409*4882a593Smuzhiyun #ifdef CONFIG_ARCH_MSM
410*4882a593Smuzhiyun uint8 no_cfg_restore;
411*4882a593Smuzhiyun #endif /* CONFIG_ARCH_MSM */
412*4882a593Smuzhiyun struct_pcie_register_event pcie_event;
413*4882a593Smuzhiyun #endif /* CONFIG_ARCH_MSM || CONFIG_ARCH_EXYNOS && !SUPPORT_EXYNOS7420 */
414*4882a593Smuzhiyun bool read_shm_fail;
415*4882a593Smuzhiyun #endif /* SUPPORT_LINKDOWN_RECOVERY */
416*4882a593Smuzhiyun int32 idletime; /* Control for activity timeout */
417*4882a593Smuzhiyun bool rpm_enabled;
418*4882a593Smuzhiyun #ifdef DHD_PCIE_RUNTIMEPM
419*4882a593Smuzhiyun int32 idlecount; /* Activity timeout counter */
420*4882a593Smuzhiyun int32 bus_wake; /* For wake up the bus */
421*4882a593Smuzhiyun bool runtime_resume_done; /* For check runtime suspend end */
422*4882a593Smuzhiyun struct mutex pm_lock; /* Synchronize for system PM & runtime PM */
423*4882a593Smuzhiyun wait_queue_head_t rpm_queue; /* wait-queue for bus wake up */
424*4882a593Smuzhiyun #endif /* DHD_PCIE_RUNTIMEPM */
425*4882a593Smuzhiyun uint32 d3_inform_cnt;
426*4882a593Smuzhiyun uint32 d0_inform_cnt;
427*4882a593Smuzhiyun uint32 d0_inform_in_use_cnt;
428*4882a593Smuzhiyun uint8 force_suspend;
429*4882a593Smuzhiyun uint8 is_linkdown;
430*4882a593Smuzhiyun uint8 no_bus_init;
431*4882a593Smuzhiyun #ifdef IDLE_TX_FLOW_MGMT
432*4882a593Smuzhiyun bool enable_idle_flowring_mgmt;
433*4882a593Smuzhiyun #endif /* IDLE_TX_FLOW_MGMT */
434*4882a593Smuzhiyun struct dhd_pcie_rev api;
435*4882a593Smuzhiyun bool use_mailbox;
436*4882a593Smuzhiyun bool use_d0_inform;
437*4882a593Smuzhiyun void *bus_lp_state_lock;
438*4882a593Smuzhiyun void *pwr_req_lock;
439*4882a593Smuzhiyun bool dongle_in_deepsleep;
440*4882a593Smuzhiyun void *dongle_ds_lock;
441*4882a593Smuzhiyun bool bar1_switch_enab;
442*4882a593Smuzhiyun void *bar1_switch_lock;
443*4882a593Smuzhiyun void *backplane_access_lock;
444*4882a593Smuzhiyun enum dhd_bus_low_power_state bus_low_power_state;
445*4882a593Smuzhiyun #ifdef DHD_FLOW_RING_STATUS_TRACE
446*4882a593Smuzhiyun dhd_frs_trace_t frs_isr_trace[FRS_TRACE_SIZE]; /* frs - flow_ring_status */
447*4882a593Smuzhiyun dhd_frs_trace_t frs_dpc_trace[FRS_TRACE_SIZE]; /* frs - flow_ring_status */
448*4882a593Smuzhiyun uint32 frs_isr_count;
449*4882a593Smuzhiyun uint32 frs_dpc_count;
450*4882a593Smuzhiyun #endif /* DHD_FLOW_RING_STATUS_TRACE */
451*4882a593Smuzhiyun #ifdef DHD_MMIO_TRACE
452*4882a593Smuzhiyun dhd_mmio_trace_t mmio_trace[MAX_MMIO_TRACE_SIZE];
453*4882a593Smuzhiyun uint32 mmio_trace_count;
454*4882a593Smuzhiyun #endif /* defined(DHD_MMIO_TRACE) */
455*4882a593Smuzhiyun dhd_ds_trace_t ds_trace[MAX_DS_TRACE_SIZE];
456*4882a593Smuzhiyun uint32 ds_trace_count;
457*4882a593Smuzhiyun uint32 hostready_count; /* Number of hostready issued */
458*4882a593Smuzhiyun #if defined(PCIE_OOB) || defined (BCMPCIE_OOB_HOST_WAKE)
459*4882a593Smuzhiyun bool oob_presuspend;
460*4882a593Smuzhiyun #endif /* PCIE_OOB || BCMPCIE_OOB_HOST_WAKE */
461*4882a593Smuzhiyun dhdpcie_config_save_t saved_config;
462*4882a593Smuzhiyun ulong resume_intr_enable_count;
463*4882a593Smuzhiyun ulong dpc_intr_enable_count;
464*4882a593Smuzhiyun ulong isr_intr_disable_count;
465*4882a593Smuzhiyun ulong suspend_intr_disable_count;
466*4882a593Smuzhiyun ulong dpc_return_busdown_count;
467*4882a593Smuzhiyun ulong non_ours_irq_count;
468*4882a593Smuzhiyun #ifdef BCMPCIE_OOB_HOST_WAKE
469*4882a593Smuzhiyun ulong oob_intr_count;
470*4882a593Smuzhiyun ulong oob_intr_enable_count;
471*4882a593Smuzhiyun ulong oob_intr_disable_count;
472*4882a593Smuzhiyun uint64 last_oob_irq_isr_time;
473*4882a593Smuzhiyun uint64 last_oob_irq_thr_time;
474*4882a593Smuzhiyun uint64 last_oob_irq_enable_time;
475*4882a593Smuzhiyun uint64 last_oob_irq_disable_time;
476*4882a593Smuzhiyun #endif /* BCMPCIE_OOB_HOST_WAKE */
477*4882a593Smuzhiyun uint64 isr_entry_time;
478*4882a593Smuzhiyun uint64 isr_exit_time;
479*4882a593Smuzhiyun uint64 isr_sched_dpc_time;
480*4882a593Smuzhiyun uint64 rpm_sched_dpc_time;
481*4882a593Smuzhiyun uint64 dpc_entry_time;
482*4882a593Smuzhiyun uint64 dpc_exit_time;
483*4882a593Smuzhiyun uint64 resched_dpc_time;
484*4882a593Smuzhiyun uint64 last_d3_inform_time;
485*4882a593Smuzhiyun uint64 last_process_ctrlbuf_time;
486*4882a593Smuzhiyun uint64 last_process_flowring_time;
487*4882a593Smuzhiyun uint64 last_process_txcpl_time;
488*4882a593Smuzhiyun uint64 last_process_rxcpl_time;
489*4882a593Smuzhiyun uint64 last_process_infocpl_time;
490*4882a593Smuzhiyun uint64 last_process_edl_time;
491*4882a593Smuzhiyun uint64 last_suspend_start_time;
492*4882a593Smuzhiyun uint64 last_suspend_end_time;
493*4882a593Smuzhiyun uint64 last_resume_start_time;
494*4882a593Smuzhiyun uint64 last_resume_end_time;
495*4882a593Smuzhiyun uint64 last_non_ours_irq_time;
496*4882a593Smuzhiyun bool hwa_enabled;
497*4882a593Smuzhiyun bool idma_enabled;
498*4882a593Smuzhiyun bool ifrm_enabled;
499*4882a593Smuzhiyun bool dar_enabled;
500*4882a593Smuzhiyun uint32 dmaxfer_complete;
501*4882a593Smuzhiyun uint8 dw_option;
502*4882a593Smuzhiyun #ifdef PCIE_INB_DW
503*4882a593Smuzhiyun bool inb_enabled;
504*4882a593Smuzhiyun uint32 ds_exit_timeout;
505*4882a593Smuzhiyun uint32 host_sleep_exit_timeout;
506*4882a593Smuzhiyun uint wait_for_ds_exit;
507*4882a593Smuzhiyun uint32 inband_dw_assert_cnt; /* # of inband device_wake assert */
508*4882a593Smuzhiyun uint32 inband_dw_deassert_cnt; /* # of inband device_wake deassert */
509*4882a593Smuzhiyun uint32 inband_ds_exit_host_cnt; /* # of DS-EXIT , host initiated */
510*4882a593Smuzhiyun uint32 inband_ds_exit_device_cnt; /* # of DS-EXIT , device initiated */
511*4882a593Smuzhiyun uint32 inband_ds_exit_to_cnt; /* # of DS-EXIT timeout */
512*4882a593Smuzhiyun uint32 inband_host_sleep_exit_to_cnt; /* # of Host_Sleep exit timeout */
513*4882a593Smuzhiyun void *inb_lock; /* Lock to serialize in band device wake activity */
514*4882a593Smuzhiyun /* # of contexts in the host which currently want a FW transaction */
515*4882a593Smuzhiyun uint32 host_active_cnt;
516*4882a593Smuzhiyun bool skip_ds_ack; /* Skip DS-ACK during suspend in progress */
517*4882a593Smuzhiyun #endif /* PCIE_INB_DW */
518*4882a593Smuzhiyun #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
519*4882a593Smuzhiyun bool ds_enabled;
520*4882a593Smuzhiyun #endif
521*4882a593Smuzhiyun #ifdef DHD_PCIE_RUNTIMEPM
522*4882a593Smuzhiyun bool chk_pm; /* To avoid counting of wake up from Runtime PM */
523*4882a593Smuzhiyun #endif /* DHD_PCIE_RUNTIMEPM */
524*4882a593Smuzhiyun #if defined(PCIE_INB_DW)
525*4882a593Smuzhiyun bool calc_ds_exit_latency;
526*4882a593Smuzhiyun bool deep_sleep; /* Indicates deep_sleep set or unset by the DHD IOVAR deep_sleep */
527*4882a593Smuzhiyun uint64 ds_exit_latency;
528*4882a593Smuzhiyun uint64 ds_exit_ts1;
529*4882a593Smuzhiyun uint64 ds_exit_ts2;
530*4882a593Smuzhiyun #endif /* PCIE_INB_DW */
531*4882a593Smuzhiyun bool _dar_war;
532*4882a593Smuzhiyun #ifdef GDB_PROXY
533*4882a593Smuzhiyun /* True if firmware loaded and backplane accessible */
534*4882a593Smuzhiyun bool gdb_proxy_access_enabled;
535*4882a593Smuzhiyun /* ID set by last "gdb_proxy_probe" iovar */
536*4882a593Smuzhiyun uint32 gdb_proxy_last_id;
537*4882a593Smuzhiyun /* True if firmware was started in bootloader mode */
538*4882a593Smuzhiyun bool gdb_proxy_bootloader_mode;
539*4882a593Smuzhiyun #endif /* GDB_PROXY */
540*4882a593Smuzhiyun uint8 dma_chan;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun bool cto_enable; /* enable PCIE CTO Prevention and recovery */
543*4882a593Smuzhiyun uint32 cto_threshold; /* PCIE CTO timeout threshold */
544*4882a593Smuzhiyun bool cto_triggered; /* CTO is triggered */
545*4882a593Smuzhiyun bool intr_enabled; /* ready to receive interrupts from dongle */
546*4882a593Smuzhiyun int pwr_req_ref;
547*4882a593Smuzhiyun bool flr_force_fail; /* user intends to simulate flr force fail */
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun /* Information used to compose the memory map and to write the memory map,
550*4882a593Smuzhiyun * FW, and FW signature to dongle RAM.
551*4882a593Smuzhiyun * This information is used by the bootloader.
552*4882a593Smuzhiyun */
553*4882a593Smuzhiyun uint32 ramtop_addr; /* Dongle address of unused space at top of RAM */
554*4882a593Smuzhiyun uint32 fw_download_addr; /* Dongle address of FW download */
555*4882a593Smuzhiyun uint32 fw_download_len; /* Length in bytes of FW download */
556*4882a593Smuzhiyun uint32 fwsig_download_addr; /* Dongle address of FW signature download */
557*4882a593Smuzhiyun uint32 fwsig_download_len; /* Length in bytes of FW signature download */
558*4882a593Smuzhiyun uint32 fwstat_download_addr; /* Dongle address of FWS status download */
559*4882a593Smuzhiyun uint32 fwstat_download_len; /* Length in bytes of FWS status download */
560*4882a593Smuzhiyun uint32 fw_memmap_download_addr; /* Dongle address of FWS memory-info download */
561*4882a593Smuzhiyun uint32 fw_memmap_download_len; /* Length in bytes of FWS memory-info download */
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun char fwsig_filename[DHD_FILENAME_MAX]; /* Name of FW signature file */
564*4882a593Smuzhiyun char bootloader_filename[DHD_FILENAME_MAX]; /* Name of bootloader image file */
565*4882a593Smuzhiyun uint32 bootloader_addr; /* Dongle address of bootloader download */
566*4882a593Smuzhiyun bool force_bt_quiesce; /* send bt_quiesce command to BT driver. */
567*4882a593Smuzhiyun bool rc_ep_aspm_cap; /* RC and EP ASPM capable */
568*4882a593Smuzhiyun bool rc_ep_l1ss_cap; /* EC and EP L1SS capable */
569*4882a593Smuzhiyun #if defined(DHD_H2D_LOG_TIME_SYNC)
570*4882a593Smuzhiyun ulong dhd_rte_time_sync_count; /* OSL_SYSUPTIME_US() */
571*4882a593Smuzhiyun #endif /* DHD_H2D_LOG_TIME_SYNC */
572*4882a593Smuzhiyun #ifdef D2H_MINIDUMP
573*4882a593Smuzhiyun bool d2h_minidump; /* This flag will be set if Host and FW handshake to collect minidump */
574*4882a593Smuzhiyun bool d2h_minidump_override; /* Force disable minidump through dhd IOVAR */
575*4882a593Smuzhiyun #endif /* D2H_MINIDUMP */
576*4882a593Smuzhiyun #ifdef BCMSLTGT
577*4882a593Smuzhiyun int xtalfreq; /* Xtal frequency used for htclkratio calculation */
578*4882a593Smuzhiyun uint32 ilp_tick; /* ILP ticks per second read from pmutimer */
579*4882a593Smuzhiyun uint32 xtal_ratio; /* xtal ticks per 4 ILP ticks read from pmu_xtalfreq */
580*4882a593Smuzhiyun #endif /* BCMSLTGT */
581*4882a593Smuzhiyun #ifdef BT_OVER_PCIE
582*4882a593Smuzhiyun /* whether the chip is in BT over PCIE mode or not */
583*4882a593Smuzhiyun bool btop_mode;
584*4882a593Smuzhiyun #endif /* BT_OVER_PCIE */
585*4882a593Smuzhiyun uint16 hp2p_txcpl_max_items;
586*4882a593Smuzhiyun uint16 hp2p_rxcpl_max_items;
587*4882a593Smuzhiyun /* PCIE coherent status */
588*4882a593Smuzhiyun uint32 coherent_state;
589*4882a593Smuzhiyun uint32 inb_dw_deassert_cnt;
590*4882a593Smuzhiyun uint64 arm_oor_time;
591*4882a593Smuzhiyun uint64 rd_shared_pass_time;
592*4882a593Smuzhiyun uint32 hwa_mem_base;
593*4882a593Smuzhiyun uint32 hwa_mem_size;
594*4882a593Smuzhiyun } dhd_bus_t;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun #ifdef DHD_MSI_SUPPORT
597*4882a593Smuzhiyun extern uint enable_msi;
598*4882a593Smuzhiyun #endif /* DHD_MSI_SUPPORT */
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun enum {
601*4882a593Smuzhiyun PCIE_INTX = 0,
602*4882a593Smuzhiyun PCIE_MSI = 1
603*4882a593Smuzhiyun };
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun enum {
606*4882a593Smuzhiyun PCIE_D2H_INTMASK_CTRL = 0,
607*4882a593Smuzhiyun PCIE_HOST_IRQ_CTRL = 1
608*4882a593Smuzhiyun };
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun static INLINE bool
__dhd_check_bus_in_lps(dhd_bus_t * bus)611*4882a593Smuzhiyun __dhd_check_bus_in_lps(dhd_bus_t *bus)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun bool ret = (bus->bus_low_power_state == DHD_BUS_D3_INFORM_SENT) ||
614*4882a593Smuzhiyun (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED);
615*4882a593Smuzhiyun return ret;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun static INLINE bool
dhd_check_bus_in_lps(dhd_bus_t * bus)619*4882a593Smuzhiyun dhd_check_bus_in_lps(dhd_bus_t *bus)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun unsigned long flags_bus;
622*4882a593Smuzhiyun bool ret;
623*4882a593Smuzhiyun DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus);
624*4882a593Smuzhiyun ret = __dhd_check_bus_in_lps(bus);
625*4882a593Smuzhiyun DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus);
626*4882a593Smuzhiyun return ret;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun static INLINE bool
__dhd_check_bus_lps_d3_acked(dhd_bus_t * bus)630*4882a593Smuzhiyun __dhd_check_bus_lps_d3_acked(dhd_bus_t *bus)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun bool ret = (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED);
633*4882a593Smuzhiyun return ret;
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun static INLINE bool
dhd_check_bus_lps_d3_acked(dhd_bus_t * bus)637*4882a593Smuzhiyun dhd_check_bus_lps_d3_acked(dhd_bus_t *bus)
638*4882a593Smuzhiyun {
639*4882a593Smuzhiyun unsigned long flags_bus;
640*4882a593Smuzhiyun bool ret;
641*4882a593Smuzhiyun DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus);
642*4882a593Smuzhiyun ret = __dhd_check_bus_lps_d3_acked(bus);
643*4882a593Smuzhiyun DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus);
644*4882a593Smuzhiyun return ret;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun static INLINE void
__dhd_set_bus_not_in_lps(dhd_bus_t * bus)648*4882a593Smuzhiyun __dhd_set_bus_not_in_lps(dhd_bus_t *bus)
649*4882a593Smuzhiyun {
650*4882a593Smuzhiyun bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
651*4882a593Smuzhiyun return;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun static INLINE void
dhd_set_bus_not_in_lps(dhd_bus_t * bus)655*4882a593Smuzhiyun dhd_set_bus_not_in_lps(dhd_bus_t *bus)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun unsigned long flags_bus;
658*4882a593Smuzhiyun DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus);
659*4882a593Smuzhiyun __dhd_set_bus_not_in_lps(bus);
660*4882a593Smuzhiyun DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus);
661*4882a593Smuzhiyun return;
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun static INLINE void
__dhd_set_bus_lps_d3_informed(dhd_bus_t * bus)665*4882a593Smuzhiyun __dhd_set_bus_lps_d3_informed(dhd_bus_t *bus)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun bus->bus_low_power_state = DHD_BUS_D3_INFORM_SENT;
668*4882a593Smuzhiyun return;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun static INLINE void
dhd_set_bus_lps_d3_informed(dhd_bus_t * bus)672*4882a593Smuzhiyun dhd_set_bus_lps_d3_informed(dhd_bus_t *bus)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun unsigned long flags_bus;
675*4882a593Smuzhiyun DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus);
676*4882a593Smuzhiyun __dhd_set_bus_lps_d3_informed(bus);
677*4882a593Smuzhiyun DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus);
678*4882a593Smuzhiyun return;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun static INLINE void
__dhd_set_bus_lps_d3_acked(dhd_bus_t * bus)682*4882a593Smuzhiyun __dhd_set_bus_lps_d3_acked(dhd_bus_t *bus)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED;
685*4882a593Smuzhiyun return;
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun static INLINE void
dhd_set_bus_lps_d3_acked(dhd_bus_t * bus)689*4882a593Smuzhiyun dhd_set_bus_lps_d3_acked(dhd_bus_t *bus)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun unsigned long flags_bus;
692*4882a593Smuzhiyun DHD_BUS_LP_STATE_LOCK(bus->bus_lp_state_lock, flags_bus);
693*4882a593Smuzhiyun __dhd_set_bus_lps_d3_acked(bus);
694*4882a593Smuzhiyun DHD_BUS_LP_STATE_UNLOCK(bus->bus_lp_state_lock, flags_bus);
695*4882a593Smuzhiyun return;
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun /* check routines */
699*4882a593Smuzhiyun #define DHD_CHK_BUS_IN_LPS(bus) dhd_check_bus_in_lps(bus)
700*4882a593Smuzhiyun #define __DHD_CHK_BUS_IN_LPS(bus) __dhd_check_bus_in_lps(bus)
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun #define DHD_CHK_BUS_NOT_IN_LPS(bus) !(DHD_CHK_BUS_IN_LPS(bus))
703*4882a593Smuzhiyun #define __DHD_CHK_BUS_NOT_IN_LPS(bus) !(__DHD_CHK_BUS_IN_LPS(bus))
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun #define DHD_CHK_BUS_LPS_D3_INFORMED(bus) DHD_CHK_BUS_IN_LPS(bus)
706*4882a593Smuzhiyun #define __DHD_CHK_BUS_LPS_D3_INFORMED(bus) __DHD_CHK_BUS_IN_LPS(bus)
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun #define DHD_CHK_BUS_LPS_D3_ACKED(bus) dhd_check_bus_lps_d3_acked(bus)
709*4882a593Smuzhiyun #define __DHD_CHK_BUS_LPS_D3_ACKED(bus) __dhd_check_bus_lps_d3_acked(bus)
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun /* set routines */
712*4882a593Smuzhiyun #define DHD_SET_BUS_NOT_IN_LPS(bus) dhd_set_bus_not_in_lps(bus)
713*4882a593Smuzhiyun #define __DHD_SET_BUS_NOT_IN_LPS(bus) __dhd_set_bus_not_in_lps(bus)
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun #define DHD_SET_BUS_LPS_D3_INFORMED(bus) dhd_set_bus_lps_d3_informed(bus)
716*4882a593Smuzhiyun #define __DHD_SET_BUS_LPS_D3_INFORMED(bus) __dhd_set_bus_lps_d3_informed(bus)
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun #define DHD_SET_BUS_LPS_D3_ACKED(bus) dhd_set_bus_lps_d3_acked(bus)
719*4882a593Smuzhiyun #define __DHD_SET_BUS_LPS_D3_ACKED(bus) __dhd_set_bus_lps_d3_acked(bus)
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun /* function declarations */
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun extern uint32* dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size);
724*4882a593Smuzhiyun extern int dhdpcie_bus_register(void);
725*4882a593Smuzhiyun extern void dhdpcie_bus_unregister(void);
726*4882a593Smuzhiyun extern bool dhdpcie_chipmatch(uint16 vendor, uint16 device);
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun extern int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr,
729*4882a593Smuzhiyun volatile char *regs, volatile char *tcm, void *pci_dev, wifi_adapter_info_t *adapter);
730*4882a593Smuzhiyun extern uint32 dhdpcie_bus_cfg_read_dword(struct dhd_bus *bus, uint32 addr, uint32 size);
731*4882a593Smuzhiyun extern void dhdpcie_bus_cfg_write_dword(struct dhd_bus *bus, uint32 addr, uint32 size, uint32 data);
732*4882a593Smuzhiyun extern void dhdpcie_bus_intr_enable(struct dhd_bus *bus);
733*4882a593Smuzhiyun extern void dhdpcie_bus_intr_disable(struct dhd_bus *bus);
734*4882a593Smuzhiyun extern int dhpcie_bus_mask_interrupt(dhd_bus_t *bus);
735*4882a593Smuzhiyun extern void dhdpcie_bus_release(struct dhd_bus *bus);
736*4882a593Smuzhiyun extern int32 dhdpcie_bus_isr(struct dhd_bus *bus);
737*4882a593Smuzhiyun extern void dhdpcie_free_irq(dhd_bus_t *bus);
738*4882a593Smuzhiyun extern void dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value);
739*4882a593Smuzhiyun extern void dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake);
740*4882a593Smuzhiyun extern void dhdpcie_dongle_reset(dhd_bus_t *bus);
741*4882a593Smuzhiyun extern int dhd_bus_cfg_sprom_ctrl_bp_reset(struct dhd_bus *bus);
742*4882a593Smuzhiyun extern int dhd_bus_cfg_ss_ctrl_bp_reset(struct dhd_bus *bus);
743*4882a593Smuzhiyun #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
744*4882a593Smuzhiyun extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint);
745*4882a593Smuzhiyun #else
746*4882a593Smuzhiyun extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state);
747*4882a593Smuzhiyun #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
748*4882a593Smuzhiyun extern int dhdpcie_pci_suspend_resume(struct dhd_bus *bus, bool state);
749*4882a593Smuzhiyun extern uint32 dhdpcie_force_alp(struct dhd_bus *bus, bool enable);
750*4882a593Smuzhiyun extern uint32 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int force_l1_entry_time);
751*4882a593Smuzhiyun extern bool dhdpcie_tcm_valid(dhd_bus_t *bus);
752*4882a593Smuzhiyun extern void dhdpcie_pme_active(osl_t *osh, bool enable);
753*4882a593Smuzhiyun extern bool dhdpcie_pme_cap(osl_t *osh);
754*4882a593Smuzhiyun extern uint32 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val);
755*4882a593Smuzhiyun extern void dhdpcie_set_pmu_min_res_mask(struct dhd_bus *bus, uint min_res_mask);
756*4882a593Smuzhiyun extern uint8 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val);
757*4882a593Smuzhiyun extern int dhdpcie_disable_irq(dhd_bus_t *bus);
758*4882a593Smuzhiyun extern int dhdpcie_disable_irq_nosync(dhd_bus_t *bus);
759*4882a593Smuzhiyun extern int dhdpcie_enable_irq(dhd_bus_t *bus);
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun extern void dhd_bus_dump_dar_registers(struct dhd_bus *bus);
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun #if defined(linux) || defined(LINUX)
764*4882a593Smuzhiyun extern uint32 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset);
765*4882a593Smuzhiyun extern uint32 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
766*4882a593Smuzhiyun bool is_write, uint32 writeval);
767*4882a593Smuzhiyun extern uint32 dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
768*4882a593Smuzhiyun bool is_write, uint32 writeval);
769*4882a593Smuzhiyun extern uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus);
770*4882a593Smuzhiyun #else
dhdpcie_rc_config_read(dhd_bus_t * bus,uint offset)771*4882a593Smuzhiyun static INLINE uint32 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset) { return 0;}
dhdpcie_rc_access_cap(dhd_bus_t * bus,int cap,uint offset,bool is_ext,bool is_write,uint32 writeval)772*4882a593Smuzhiyun static INLINE uint32 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
773*4882a593Smuzhiyun bool is_write, uint32 writeval) { return -1;}
dhdpcie_ep_access_cap(dhd_bus_t * bus,int cap,uint offset,bool is_ext,bool is_write,uint32 writeval)774*4882a593Smuzhiyun static INLINE uint32 dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext,
775*4882a593Smuzhiyun bool is_write, uint32 writeval) { return -1;}
dhd_debug_get_rc_linkcap(dhd_bus_t * bus)776*4882a593Smuzhiyun static INLINE uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus) { return -1;}
777*4882a593Smuzhiyun #endif
778*4882a593Smuzhiyun #if defined(linux) || defined(LINUX)
779*4882a593Smuzhiyun extern int dhdpcie_start_host_dev(dhd_bus_t *bus);
780*4882a593Smuzhiyun extern int dhdpcie_stop_host_dev(dhd_bus_t *bus);
781*4882a593Smuzhiyun extern int dhdpcie_disable_device(dhd_bus_t *bus);
782*4882a593Smuzhiyun extern int dhdpcie_alloc_resource(dhd_bus_t *bus);
783*4882a593Smuzhiyun extern void dhdpcie_free_resource(dhd_bus_t *bus);
784*4882a593Smuzhiyun extern void dhdpcie_dump_resource(dhd_bus_t *bus);
785*4882a593Smuzhiyun extern int dhdpcie_bus_request_irq(struct dhd_bus *bus);
786*4882a593Smuzhiyun void dhdpcie_os_setbar1win(dhd_bus_t *bus, uint32 addr);
787*4882a593Smuzhiyun void dhdpcie_os_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
788*4882a593Smuzhiyun uint8 dhdpcie_os_rtcm8(dhd_bus_t *bus, ulong offset);
789*4882a593Smuzhiyun void dhdpcie_os_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
790*4882a593Smuzhiyun uint16 dhdpcie_os_rtcm16(dhd_bus_t *bus, ulong offset);
791*4882a593Smuzhiyun void dhdpcie_os_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
792*4882a593Smuzhiyun uint32 dhdpcie_os_rtcm32(dhd_bus_t *bus, ulong offset);
793*4882a593Smuzhiyun #ifdef DHD_SUPPORT_64BIT
794*4882a593Smuzhiyun void dhdpcie_os_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
795*4882a593Smuzhiyun uint64 dhdpcie_os_rtcm64(dhd_bus_t *bus, ulong offset);
796*4882a593Smuzhiyun #endif
797*4882a593Smuzhiyun #endif /* LINUX || linux */
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun #if defined(linux) || defined(LINUX) || defined(DHD_EFI)
800*4882a593Smuzhiyun extern int dhdpcie_enable_device(dhd_bus_t *bus);
801*4882a593Smuzhiyun #endif
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun #ifdef BCMPCIE_OOB_HOST_WAKE
804*4882a593Smuzhiyun extern int dhdpcie_oob_intr_register(dhd_bus_t *bus);
805*4882a593Smuzhiyun extern void dhdpcie_oob_intr_unregister(dhd_bus_t *bus);
806*4882a593Smuzhiyun extern void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable);
807*4882a593Smuzhiyun extern int dhdpcie_get_oob_irq_num(struct dhd_bus *bus);
808*4882a593Smuzhiyun extern int dhdpcie_get_oob_irq_status(struct dhd_bus *bus);
809*4882a593Smuzhiyun extern int dhdpcie_get_oob_irq_level(void);
810*4882a593Smuzhiyun #endif /* BCMPCIE_OOB_HOST_WAKE */
811*4882a593Smuzhiyun #ifdef PCIE_OOB
812*4882a593Smuzhiyun extern void dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val);
813*4882a593Smuzhiyun extern int dhd_oob_get_bt_reg_on(struct dhd_bus *bus);
814*4882a593Smuzhiyun extern void dhdpcie_oob_init(dhd_bus_t *bus);
815*4882a593Smuzhiyun extern int dhd_os_oob_set_device_wake(struct dhd_bus *bus, bool val);
816*4882a593Smuzhiyun extern void dhd_os_ib_set_device_wake(struct dhd_bus *bus, bool val);
817*4882a593Smuzhiyun #endif /* PCIE_OOB */
818*4882a593Smuzhiyun #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
819*4882a593Smuzhiyun extern void dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus);
820*4882a593Smuzhiyun #endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun #if defined(linux) || defined(LINUX)
823*4882a593Smuzhiyun /* XXX: SWWLAN-82173 Making PCIe RC D3cold by force during system PM
824*4882a593Smuzhiyun * exynos_pcie_pm_suspend : RC goes to suspend status & assert PERST
825*4882a593Smuzhiyun * exynos_pcie_pm_resume : de-assert PERST & RC goes to resume status
826*4882a593Smuzhiyun */
827*4882a593Smuzhiyun #if defined(CONFIG_ARCH_EXYNOS)
828*4882a593Smuzhiyun #define EXYNOS_PCIE_VENDOR_ID 0x144d
829*4882a593Smuzhiyun #if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS7420)
830*4882a593Smuzhiyun #define EXYNOS_PCIE_DEVICE_ID 0xa575
831*4882a593Smuzhiyun #define EXYNOS_PCIE_CH_NUM 1
832*4882a593Smuzhiyun #elif defined(CONFIG_SOC_EXYNOS8890)
833*4882a593Smuzhiyun #define EXYNOS_PCIE_DEVICE_ID 0xa544
834*4882a593Smuzhiyun #define EXYNOS_PCIE_CH_NUM 0
835*4882a593Smuzhiyun #elif defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
836*4882a593Smuzhiyun defined(CONFIG_SOC_EXYNOS9820) || defined(CONFIG_SOC_EXYNOS9830) || \
837*4882a593Smuzhiyun defined(CONFIG_SOC_EXYNOS2100) || defined(CONFIG_SOC_EXYNOS1000) || \
838*4882a593Smuzhiyun defined(CONFIG_SOC_GS101)
839*4882a593Smuzhiyun #define EXYNOS_PCIE_DEVICE_ID 0xecec
840*4882a593Smuzhiyun #define EXYNOS_PCIE_CH_NUM 0
841*4882a593Smuzhiyun #else
842*4882a593Smuzhiyun #error "Not supported platform"
843*4882a593Smuzhiyun #endif /* CONFIG_SOC_EXYNOSXXXX & CONFIG_MACH_UNIVERSALXXXX */
844*4882a593Smuzhiyun extern void exynos_pcie_pm_suspend(int ch_num);
845*4882a593Smuzhiyun extern void exynos_pcie_pm_resume(int ch_num);
846*4882a593Smuzhiyun #endif /* CONFIG_ARCH_EXYNOS */
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun #if defined(CONFIG_ARCH_MSM)
849*4882a593Smuzhiyun #define MSM_PCIE_VENDOR_ID 0x17cb
850*4882a593Smuzhiyun #if defined(CONFIG_ARCH_APQ8084)
851*4882a593Smuzhiyun #define MSM_PCIE_DEVICE_ID 0x0101
852*4882a593Smuzhiyun #elif defined(CONFIG_ARCH_MSM8994)
853*4882a593Smuzhiyun #define MSM_PCIE_DEVICE_ID 0x0300
854*4882a593Smuzhiyun #elif defined(CONFIG_ARCH_MSM8996)
855*4882a593Smuzhiyun #define MSM_PCIE_DEVICE_ID 0x0104
856*4882a593Smuzhiyun #elif defined(CONFIG_ARCH_MSM8998)
857*4882a593Smuzhiyun #define MSM_PCIE_DEVICE_ID 0x0105
858*4882a593Smuzhiyun #elif defined(CONFIG_ARCH_SDM845) || defined(CONFIG_ARCH_SM8150) || \
859*4882a593Smuzhiyun defined(CONFIG_ARCH_KONA) || defined(CONFIG_ARCH_LAHAINA)
860*4882a593Smuzhiyun #define MSM_PCIE_DEVICE_ID 0x0106
861*4882a593Smuzhiyun #else
862*4882a593Smuzhiyun #error "Not supported platform"
863*4882a593Smuzhiyun #endif
864*4882a593Smuzhiyun #endif /* CONFIG_ARCH_MSM */
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun #if defined(CONFIG_X86)
867*4882a593Smuzhiyun #define X86_PCIE_VENDOR_ID 0x8086
868*4882a593Smuzhiyun #define X86_PCIE_DEVICE_ID 0x9c1a
869*4882a593Smuzhiyun #endif /* CONFIG_X86 */
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun #if defined(CONFIG_ARCH_TEGRA)
872*4882a593Smuzhiyun #define TEGRA_PCIE_VENDOR_ID 0x14e4
873*4882a593Smuzhiyun #define TEGRA_PCIE_DEVICE_ID 0x4347
874*4882a593Smuzhiyun #endif /* CONFIG_ARCH_TEGRA */
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun #if defined(BOARD_HIKEY)
877*4882a593Smuzhiyun #define HIKEY_PCIE_VENDOR_ID 0x19e5
878*4882a593Smuzhiyun #define HIKEY_PCIE_DEVICE_ID 0x3660
879*4882a593Smuzhiyun #endif /* BOARD_HIKEY */
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun #define DUMMY_PCIE_VENDOR_ID 0xffff
882*4882a593Smuzhiyun #define DUMMY_PCIE_DEVICE_ID 0xffff
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun #if defined(CONFIG_ARCH_EXYNOS)
885*4882a593Smuzhiyun #define PCIE_RC_VENDOR_ID EXYNOS_PCIE_VENDOR_ID
886*4882a593Smuzhiyun #define PCIE_RC_DEVICE_ID EXYNOS_PCIE_DEVICE_ID
887*4882a593Smuzhiyun #elif defined(CONFIG_ARCH_MSM)
888*4882a593Smuzhiyun #define PCIE_RC_VENDOR_ID MSM_PCIE_VENDOR_ID
889*4882a593Smuzhiyun #define PCIE_RC_DEVICE_ID MSM_PCIE_DEVICE_ID
890*4882a593Smuzhiyun #elif defined(CONFIG_X86)
891*4882a593Smuzhiyun #define PCIE_RC_VENDOR_ID X86_PCIE_VENDOR_ID
892*4882a593Smuzhiyun #define PCIE_RC_DEVICE_ID X86_PCIE_DEVICE_ID
893*4882a593Smuzhiyun #elif defined(CONFIG_ARCH_TEGRA)
894*4882a593Smuzhiyun #define PCIE_RC_VENDOR_ID TEGRA_PCIE_VENDOR_ID
895*4882a593Smuzhiyun #define PCIE_RC_DEVICE_ID TEGRA_PCIE_DEVICE_ID
896*4882a593Smuzhiyun #elif defined(BOARD_HIKEY)
897*4882a593Smuzhiyun #define PCIE_RC_VENDOR_ID HIKEY_PCIE_VENDOR_ID
898*4882a593Smuzhiyun #define PCIE_RC_DEVICE_ID HIKEY_PCIE_DEVICE_ID
899*4882a593Smuzhiyun #else
900*4882a593Smuzhiyun /* Use dummy vendor and device IDs */
901*4882a593Smuzhiyun #define PCIE_RC_VENDOR_ID DUMMY_PCIE_VENDOR_ID
902*4882a593Smuzhiyun #define PCIE_RC_DEVICE_ID DUMMY_PCIE_DEVICE_ID
903*4882a593Smuzhiyun #endif /* CONFIG_ARCH_EXYNOS */
904*4882a593Smuzhiyun #endif /* linux || LINUX */
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun #define DHD_REGULAR_RING 0
907*4882a593Smuzhiyun #define DHD_HP2P_RING 1
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun #ifdef CONFIG_ARCH_TEGRA
910*4882a593Smuzhiyun extern int tegra_pcie_pm_suspend(void);
911*4882a593Smuzhiyun extern int tegra_pcie_pm_resume(void);
912*4882a593Smuzhiyun #endif /* CONFIG_ARCH_TEGRA */
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun extern int dhd_buzzz_dump_dngl(dhd_bus_t *bus);
915*4882a593Smuzhiyun #ifdef IDLE_TX_FLOW_MGMT
916*4882a593Smuzhiyun extern int dhd_bus_flow_ring_resume_request(struct dhd_bus *bus, void *arg);
917*4882a593Smuzhiyun extern void dhd_bus_flow_ring_resume_response(struct dhd_bus *bus, uint16 flowid, int32 status);
918*4882a593Smuzhiyun extern int dhd_bus_flow_ring_suspend_request(struct dhd_bus *bus, void *arg);
919*4882a593Smuzhiyun extern void dhd_bus_flow_ring_suspend_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
920*4882a593Smuzhiyun extern void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus,
921*4882a593Smuzhiyun flow_ring_node_t *flow_ring_node);
922*4882a593Smuzhiyun extern void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus,
923*4882a593Smuzhiyun flow_ring_node_t *flow_ring_node);
924*4882a593Smuzhiyun extern void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus,
925*4882a593Smuzhiyun flow_ring_node_t *flow_ring_node);
926*4882a593Smuzhiyun extern void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus,
927*4882a593Smuzhiyun flow_ring_node_t *flow_ring_node);
928*4882a593Smuzhiyun #endif /* IDLE_TX_FLOW_MGMT */
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun extern int dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data);
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun #ifdef DHD_WAKE_STATUS
933*4882a593Smuzhiyun int bcmpcie_get_total_wake(struct dhd_bus *bus);
934*4882a593Smuzhiyun int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag);
935*4882a593Smuzhiyun #endif /* DHD_WAKE_STATUS */
936*4882a593Smuzhiyun #ifdef DHD_MMIO_TRACE
937*4882a593Smuzhiyun extern void dhd_dump_bus_mmio_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf);
938*4882a593Smuzhiyun #endif /* defined(DHD_MMIO_TRACE) */
939*4882a593Smuzhiyun extern void dhd_dump_bus_ds_trace(dhd_bus_t *bus, struct bcmstrbuf *strbuf);
940*4882a593Smuzhiyun extern bool dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus);
941*4882a593Smuzhiyun extern void dhd_bus_hostready(struct dhd_bus *bus);
942*4882a593Smuzhiyun #ifdef PCIE_OOB
943*4882a593Smuzhiyun extern bool dhdpcie_bus_get_pcie_oob_dw_supported(dhd_bus_t *bus);
944*4882a593Smuzhiyun #endif /* PCIE_OOB */
945*4882a593Smuzhiyun #ifdef PCIE_INB_DW
946*4882a593Smuzhiyun extern bool dhdpcie_bus_get_pcie_inband_dw_supported(dhd_bus_t *bus);
947*4882a593Smuzhiyun extern void dhdpcie_bus_set_pcie_inband_dw_state(dhd_bus_t *bus,
948*4882a593Smuzhiyun enum dhd_bus_ds_state state);
949*4882a593Smuzhiyun extern enum dhd_bus_ds_state dhdpcie_bus_get_pcie_inband_dw_state(dhd_bus_t *bus);
950*4882a593Smuzhiyun extern const char * dhd_convert_inb_state_names(enum dhd_bus_ds_state inbstate);
951*4882a593Smuzhiyun extern const char * dhd_convert_dsval(uint32 val, bool d2h);
952*4882a593Smuzhiyun extern int dhd_bus_inb_set_device_wake(struct dhd_bus *bus, bool val);
953*4882a593Smuzhiyun extern void dhd_bus_inb_ack_pending_ds_req(dhd_bus_t *bus);
954*4882a593Smuzhiyun #endif /* PCIE_INB_DW */
955*4882a593Smuzhiyun extern void dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option);
956*4882a593Smuzhiyun #if defined(LINUX) || defined(linux)
957*4882a593Smuzhiyun extern int dhdpcie_irq_disabled(struct dhd_bus *bus);
958*4882a593Smuzhiyun extern int dhdpcie_set_master_and_d0_pwrstate(struct dhd_bus *bus);
959*4882a593Smuzhiyun #else
dhdpcie_irq_disabled(struct dhd_bus * bus)960*4882a593Smuzhiyun static INLINE bool dhdpcie_irq_disabled(struct dhd_bus *bus) { return BCME_ERROR;}
dhdpcie_set_master_and_d0_pwrstate(struct dhd_bus * bus)961*4882a593Smuzhiyun static INLINE int dhdpcie_set_master_and_d0_pwrstate(struct dhd_bus *bus)
962*4882a593Smuzhiyun { return BCME_ERROR;}
963*4882a593Smuzhiyun #endif /* defined(LINUX) || defined(linux) */
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun #ifdef DHD_EFI
966*4882a593Smuzhiyun extern bool dhdpcie_is_arm_halted(struct dhd_bus *bus);
967*4882a593Smuzhiyun extern int dhd_os_wifi_platform_set_power(uint32 value);
968*4882a593Smuzhiyun extern void dhdpcie_dongle_pwr_toggle(dhd_bus_t *bus);
969*4882a593Smuzhiyun void dhdpcie_dongle_flr_or_pwr_toggle(dhd_bus_t *bus);
970*4882a593Smuzhiyun int dhd_control_signal(dhd_bus_t *bus, char *arg, int len, int set);
971*4882a593Smuzhiyun extern int dhd_wifi_properties(struct dhd_bus *bus, char *arg, int len);
972*4882a593Smuzhiyun extern int dhd_otp_dump(dhd_bus_t *bus, char *arg, int len);
973*4882a593Smuzhiyun extern int dhdpcie_deinit_phase1(dhd_bus_t *bus);
974*4882a593Smuzhiyun int dhdpcie_disable_intr_poll(dhd_bus_t *bus);
975*4882a593Smuzhiyun int dhdpcie_enable_intr_poll(dhd_bus_t *bus);
976*4882a593Smuzhiyun #ifdef BT_OVER_PCIE
977*4882a593Smuzhiyun int dhd_btop_test(dhd_bus_t *bus, char *arg, int len);
978*4882a593Smuzhiyun #endif /* BT_OVER_PCIE */
979*4882a593Smuzhiyun #else
dhdpcie_is_arm_halted(struct dhd_bus * bus)980*4882a593Smuzhiyun static INLINE bool dhdpcie_is_arm_halted(struct dhd_bus *bus) {return TRUE;}
dhd_os_wifi_platform_set_power(uint32 value)981*4882a593Smuzhiyun static INLINE int dhd_os_wifi_platform_set_power(uint32 value) {return BCME_OK; }
982*4882a593Smuzhiyun static INLINE void
dhdpcie_dongle_flr_or_pwr_toggle(dhd_bus_t * bus)983*4882a593Smuzhiyun dhdpcie_dongle_flr_or_pwr_toggle(dhd_bus_t *bus)
984*4882a593Smuzhiyun { return; }
985*4882a593Smuzhiyun #endif /* DHD_EFI */
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun int dhdpcie_config_check(dhd_bus_t *bus);
988*4882a593Smuzhiyun int dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr);
989*4882a593Smuzhiyun int dhdpcie_config_save(dhd_bus_t *bus);
990*4882a593Smuzhiyun int dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state);
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun extern bool dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus);
993*4882a593Smuzhiyun extern bool dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus);
994*4882a593Smuzhiyun extern bool dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus);
995*4882a593Smuzhiyun extern bool dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus);
996*4882a593Smuzhiyun extern bool dhdpcie_bus_get_hp2p_supported(dhd_bus_t *bus);
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun static INLINE uint32
dhd_pcie_config_read(dhd_bus_t * bus,uint offset,uint size)999*4882a593Smuzhiyun dhd_pcie_config_read(dhd_bus_t *bus, uint offset, uint size)
1000*4882a593Smuzhiyun {
1001*4882a593Smuzhiyun /* For 4375 or prior chips to 4375 */
1002*4882a593Smuzhiyun if (bus->sih && bus->sih->buscorerev <= 64) {
1003*4882a593Smuzhiyun OSL_DELAY(100);
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun return OSL_PCI_READ_CONFIG(bus->osh, offset, size);
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun static INLINE uint32
dhd_pcie_corereg_read(si_t * sih,uint val)1009*4882a593Smuzhiyun dhd_pcie_corereg_read(si_t *sih, uint val)
1010*4882a593Smuzhiyun {
1011*4882a593Smuzhiyun /* For 4375 or prior chips to 4375 */
1012*4882a593Smuzhiyun if (sih->buscorerev <= 64) {
1013*4882a593Smuzhiyun OSL_DELAY(100);
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0, val);
1016*4882a593Smuzhiyun return si_corereg(sih, sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), 0, 0);
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun extern int dhdpcie_get_fwpath_otp(dhd_bus_t *bus, char *fw_path, char *nv_path,
1020*4882a593Smuzhiyun char *clm_path, char *txcap_path);
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun extern int dhd_pcie_debug_info_dump(dhd_pub_t *dhd);
1023*4882a593Smuzhiyun extern void dhd_pcie_intr_count_dump(dhd_pub_t *dhd);
1024*4882a593Smuzhiyun extern void dhdpcie_bus_clear_intstatus(dhd_bus_t *bus);
1025*4882a593Smuzhiyun #ifdef DHD_HP2P
1026*4882a593Smuzhiyun extern uint16 dhd_bus_get_hp2p_ring_max_size(dhd_bus_t *bus, bool tx);
1027*4882a593Smuzhiyun #endif
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun #if defined(DHD_EFI)
1030*4882a593Smuzhiyun extern wifi_properties_t *dhd_get_props(dhd_bus_t *bus);
1031*4882a593Smuzhiyun #endif
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun #if defined(DHD_EFI) || defined(NDIS)
1034*4882a593Smuzhiyun extern int dhd_get_platform(dhd_pub_t* dhd, char *progname);
1035*4882a593Smuzhiyun extern bool dhdpcie_is_chip_supported(uint32 chipid, int *idx);
1036*4882a593Smuzhiyun extern bool dhdpcie_is_sflash_chip(uint32 chipid);
1037*4882a593Smuzhiyun #endif
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun extern int dhd_get_pcie_linkspeed(dhd_pub_t *dhd);
1040*4882a593Smuzhiyun extern void dhdpcie_bar1_window_switch_enab(dhd_bus_t *bus);
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun #ifdef PCIE_INB_DW
1043*4882a593Smuzhiyun extern void dhdpcie_set_dongle_deepsleep(dhd_bus_t *bus, bool val);
1044*4882a593Smuzhiyun extern void dhd_init_dongle_ds_lock(dhd_bus_t *bus);
1045*4882a593Smuzhiyun extern void dhd_deinit_dongle_ds_lock(dhd_bus_t *bus);
1046*4882a593Smuzhiyun #endif /* PCIE_INB_DW */
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun #endif /* dhd_pcie_h */
1049