xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/rockchip_wlan/infineon/bcmdhd/dhd_pcie.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * DHD Bus Module for PCIE
3  *
4  * Portions of this code are copyright (c) 2022 Cypress Semiconductor Corporation
5  *
6  * Copyright (C) 1999-2017, Broadcom Corporation
7  *
8  *      Unless you and Broadcom execute a separate written software license
9  * agreement governing use of this software, this software is licensed to you
10  * under the terms of the GNU General Public License version 2 (the "GPL"),
11  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12  * following added to such license:
13  *
14  *      As a special exception, the copyright holders of this software give you
15  * permission to link this software with independent modules, and to copy and
16  * distribute the resulting executable under terms of your choice, provided that
17  * you also meet, for each linked independent module, the terms and conditions of
18  * the license of that module.  An independent module is a module which is not
19  * derived from this software.  The special exception does not apply to any
20  * modifications of the software.
21  *
22  *      Notwithstanding the above, under no circumstances may you combine this
23  * software in any way with any other Broadcom software provided under a license
24  * other than the GPL, without Broadcom's express prior written consent.
25  *
26  *
27  * <<Broadcom-WL-IPTag/Open:>>
28  *
29  * $Id: dhd_pcie.c 702835 2017-06-05 07:19:55Z $
30  */
31 
32 /* include files */
33 #include <typedefs.h>
34 #include <bcmutils.h>
35 #include <bcmdevs.h>
36 #include <siutils.h>
37 #include <sbpcmcia.h>
38 #include <hndoobr.h>
39 #include <hndsoc.h>
40 #include <hndpmu.h>
41 #include <etd.h>
42 #include <hnd_debug.h>
43 #include <sbchipc.h>
44 #include <sbhndarm.h>
45 #include <hnd_armtrap.h>
46 #if defined(DHD_DEBUG)
47 #include <hnd_cons.h>
48 #endif /* defined(DHD_DEBUG) */
49 #include <dngl_stats.h>
50 #include <pcie_core.h>
51 #include <dhd.h>
52 #include <dhd_bus.h>
53 #include <dhd_flowring.h>
54 #include <dhd_proto.h>
55 #include <dhd_dbg.h>
56 #include <dhd_debug.h>
57 #include <dhd_daemon.h>
58 #include <dhdioctl.h>
59 #include <sdiovar.h>
60 #include <bcmmsgbuf.h>
61 #include <pcicfg.h>
62 #include <dhd_pcie.h>
63 #include <bcmpcie.h>
64 #include <bcmendian.h>
65 #include <bcmstdlib_s.h>
66 #ifdef DHDTCPACK_SUPPRESS
67 #include <dhd_ip.h>
68 #endif /* DHDTCPACK_SUPPRESS */
69 #include <bcmevent.h>
70 #include <trxhdr.h>
71 
72 extern uint32 hw_module_variant;
73 #include <pcie_core.h>
74 
75 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
76 #include <linux/pm_runtime.h>
77 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
78 
79 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
80 #include <debugger.h>
81 #endif /* DEBUGGER || DHD_DSCOPE */
82 
83 #ifdef DNGL_AXI_ERROR_LOGGING
84 #include <dhd_linux_wq.h>
85 #include <dhd_linux.h>
86 #endif /* DNGL_AXI_ERROR_LOGGING */
87 
88 #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
89 #include <dhd_linux_priv.h>
90 #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
91 
92 #include <otpdefs.h>
93 #define EXTENDED_PCIE_DEBUG_DUMP 1	/* Enable Extended pcie registers dump */
94 
95 #define MEMBLOCK	2048		/* Block size used for downloading of dongle image */
96 #define MAX_WKLK_IDLE_CHECK	3	/* times wake_lock checked before deciding not to suspend */
97 
98 #define	DHD_MAX_ITEMS_HPP_TXCPL_RING	512
99 #define	DHD_MAX_ITEMS_HPP_RXCPL_RING	512
100 
101 #define ARMCR4REG_CORECAP	(0x4/sizeof(uint32))
102 #define ARMCR4REG_MPUCTRL	(0x90/sizeof(uint32))
103 #define ACC_MPU_SHIFT		25
104 #define ACC_MPU_MASK		(0x1u << ACC_MPU_SHIFT)
105 
106 #define ARMCR4REG_BANKIDX	(0x40/sizeof(uint32))
107 #define ARMCR4REG_BANKPDA	(0x4C/sizeof(uint32))
108 /* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
109 
110 /* CTO Prevention Recovery */
111 #ifdef BCMQT_HW
112 #define CTO_TO_CLEAR_WAIT_MS 10000
113 #define CTO_TO_CLEAR_WAIT_MAX_CNT 100
114 #else
115 #define CTO_TO_CLEAR_WAIT_MS 1000
116 #define CTO_TO_CLEAR_WAIT_MAX_CNT 10
117 #endif // endif
118 
119 /* Fetch address of a member in the pciedev_shared structure in dongle memory */
120 #define DHD_PCIE_SHARED_MEMBER_ADDR(bus, member) \
121 	(bus)->shared_addr + OFFSETOF(pciedev_shared_t, member)
122 
123 /* Fetch address of a member in rings_info_ptr structure in dongle memory */
124 #define DHD_RING_INFO_MEMBER_ADDR(bus, member) \
125 	(bus)->pcie_sh->rings_info_ptr + OFFSETOF(ring_info_t, member)
126 
127 /* Fetch address of a member in the ring_mem structure in dongle memory */
128 #define DHD_RING_MEM_MEMBER_ADDR(bus, ringid, member) \
129 	(bus)->ring_sh[ringid].ring_mem_addr + OFFSETOF(ring_mem_t, member)
130 
131 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
132 	extern unsigned int system_rev;
133 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
134 
135 /* DHD module parameter */
136 extern uint32 hw_module_variant;
137 
138 #ifdef EWP_EDL
139 extern int host_edl_support;
140 #endif // endif
141 
142 #define D2H_HS_START_STATE (1 << D2H_START_SHIFT)
143 #define D2H_HS_READY_STATE (1 << D2H_START_SHIFT | 1 << D2H_READY_SHIFT)
144 
145 /* This can be overwritten by module parameter(dma_ring_indices) defined in dhd_linux.c */
146 uint dma_ring_indices = 0;
147 /* This can be overwritten by module parameter(h2d_phase) defined in dhd_linux.c */
148 bool h2d_phase = 0;
149 /* This can be overwritten by module parameter(force_trap_bad_h2d_phase)
150  * defined in dhd_linux.c
151  */
152 bool force_trap_bad_h2d_phase = 0;
153 
154 int dhd_dongle_memsize;
155 int dhd_dongle_ramsize;
156 struct dhd_bus *g_dhd_bus = NULL;
157 #ifdef DNGL_AXI_ERROR_LOGGING
158 static void dhd_log_dump_axi_error(uint8 *axi_err);
159 #endif /* DNGL_AXI_ERROR_LOGGING */
160 
161 static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
162 static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
163 #if defined(DHD_FW_COREDUMP)
164 static int dhdpcie_mem_dump(dhd_bus_t *bus);
165 static int dhdpcie_get_mem_dump(dhd_bus_t *bus);
166 #endif /* DHD_FW_COREDUMP */
167 
168 static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
169 static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid,
170 	const char *name, void *params,
171 	int plen, void *arg, int len, int val_size);
172 static int dhdpcie_bus_lpback_req(struct  dhd_bus *bus, uint32 intval);
173 static int dhdpcie_bus_dmaxfer_req(struct  dhd_bus *bus,
174 	uint32 len, uint32 srcdelay, uint32 destdelay,
175 	uint32 d11_lpbk, uint32 core_num, uint32 wait);
176 static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
177 static int dhdpcie_handshake_msg_reg_write(si_t *sih, osl_t *osh, volatile void *addr,
178 	uint *buffer);
179 static int dhdpcie_handshake_msg_reg_read(si_t *sih, osl_t *osh, volatile void *addr,
180 	uint *buffer);
181 static int dhdpcie_dongle_host_handshake_spinwait(si_t *sih, osl_t *osh, volatile void *addr,
182 	uint32 bitshift, uint32 us);
183 static int dhdpcie_dongle_host_get_handshake_address(si_t *sih, osl_t *osh, hs_addrs_t *addr);
184 static int dhdpcie_dongle_host_pre_handshake(si_t *sih, osl_t *osh, hs_addrs_t *addr);
185 static int dhdpcie_dongle_host_post_handshake(si_t *sih, osl_t *osh, hs_addrs_t *addr);
186 static int dhdpcie_dongle_host_chk_validation(si_t *sih, osl_t *osh, hs_addrs_t *addr);
187 int dhdpcie_dongle_host_pre_wd_reset_sequence(si_t *sih, osl_t *osh);
188 int dhdpcie_dongle_host_post_wd_reset_sequence(si_t *sih, osl_t *osh);
189 int dhdpcie_dongle_host_pre_chipid_access_sequence(osl_t *osh, volatile void *regva);
190 static int dhdpcie_dongle_host_post_varswrite(dhd_bus_t *bus, hs_addrs_t *addr);
191 static int _dhdpcie_download_firmware(struct dhd_bus *bus);
192 static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
193 static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
194 static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
195 static bool dhdpci_bus_read_frames(dhd_bus_t *bus);
196 static int dhdpcie_readshared_console(dhd_bus_t *bus);
197 static int dhdpcie_readshared(dhd_bus_t *bus);
198 static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
199 static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
200 static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size);
201 static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
202 	bool dongle_isolation, bool reset_flag);
203 static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
204 static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
205 static void dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr);
206 static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
207 static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
208 static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
209 static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset);
210 static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
211 static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
212 #ifdef DHD_SUPPORT_64BIT
213 static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) __attribute__ ((used));
214 static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) __attribute__ ((used));
215 #endif /* DHD_SUPPORT_64BIT */
216 static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
217 static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size);
218 static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
219 static int dhdpcie_sromotp_customvar(dhd_bus_t *bus, uint32 *customvar1, uint32 *customvar2);
220 static void dhdpcie_fw_trap(dhd_bus_t *bus);
221 static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
222 static void dhdpcie_handle_mb_data(dhd_bus_t *bus);
223 extern void dhd_dpc_enable(dhd_pub_t *dhdp);
224 extern void dhd_dpc_kill(dhd_pub_t *dhdp);
225 
226 #ifdef IDLE_TX_FLOW_MGMT
227 static void dhd_bus_check_idle_scan(dhd_bus_t *bus);
228 static void dhd_bus_idle_scan(dhd_bus_t *bus);
229 #endif /* IDLE_TX_FLOW_MGMT */
230 
231 #ifdef EXYNOS_PCIE_DEBUG
232 extern void exynos_pcie_register_dump(int ch_num);
233 #endif /* EXYNOS_PCIE_DEBUG */
234 
235 #if defined(DHD_H2D_LOG_TIME_SYNC)
236 static void dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus);
237 #endif /* DHD_H2D_LOG_TIME_SYNC */
238 
239 #define     PCI_VENDOR_ID_BROADCOM          0x14e4
240 #define     PCI_VENDOR_ID_CYPRESS           0x12be
241 
242 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
243 #define MAX_D3_ACK_TIMEOUT	100
244 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
245 
246 #define DHD_DEFAULT_DOORBELL_TIMEOUT 200	/* ms */
247 static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version);
248 static int dhdpcie_cto_error_recovery(struct dhd_bus *bus);
249 
250 static int dhdpcie_init_d11status(struct dhd_bus *bus);
251 
252 static int dhdpcie_wrt_rnd(struct dhd_bus *bus);
253 
254 extern uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd);
255 extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost);
256 
257 #ifdef DHD_HP2P
258 extern enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer);
259 static uint16 dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val);
260 #endif // endif
261 #define NUM_PATTERNS 2
262 static bool dhd_bus_tcm_test(struct dhd_bus *bus);
263 
264 /* IOVar table */
265 enum {
266 	IOV_INTR = 1,
267 #ifdef DHD_BUS_MEM_ACCESS
268 	IOV_MEMBYTES,
269 #endif /* DHD_BUS_MEM_ACCESS */
270 	IOV_MEMSIZE,
271 	IOV_SET_DOWNLOAD_STATE,
272 	IOV_DEVRESET,
273 	IOV_VARS,
274 	IOV_MSI_SIM,
275 	IOV_PCIE_LPBK,
276 	IOV_CC_NVMSHADOW,
277 	IOV_RAMSIZE,
278 	IOV_RAMSTART,
279 	IOV_SLEEP_ALLOWED,
280 	IOV_PCIE_DMAXFER,
281 	IOV_PCIE_SUSPEND,
282 #ifdef DHD_PCIE_REG_ACCESS
283 	IOV_PCIEREG,
284 	IOV_PCIECFGREG,
285 	IOV_PCIECOREREG,
286 	IOV_PCIESERDESREG,
287 	IOV_PCIEASPM,
288 	IOV_BAR0_SECWIN_REG,
289 	IOV_SBREG,
290 #endif /* DHD_PCIE_REG_ACCESS */
291 	IOV_DONGLEISOLATION,
292 	IOV_LTRSLEEPON_UNLOOAD,
293 	IOV_METADATA_DBG,
294 	IOV_RX_METADATALEN,
295 	IOV_TX_METADATALEN,
296 	IOV_TXP_THRESHOLD,
297 	IOV_BUZZZ_DUMP,
298 	IOV_DUMP_RINGUPD_BLOCK,
299 	IOV_DMA_RINGINDICES,
300 	IOV_FORCE_FW_TRAP,
301 	IOV_DB1_FOR_MB,
302 	IOV_FLOW_PRIO_MAP,
303 #ifdef DHD_PCIE_RUNTIMEPM
304 	IOV_IDLETIME,
305 #endif /* DHD_PCIE_RUNTIMEPM */
306 	IOV_RXBOUND,
307 	IOV_TXBOUND,
308 	IOV_HANGREPORT,
309 	IOV_H2D_MAILBOXDATA,
310 	IOV_INFORINGS,
311 	IOV_H2D_PHASE,
312 	IOV_H2D_ENABLE_TRAP_BADPHASE,
313 	IOV_H2D_TXPOST_MAX_ITEM,
314 	IOV_TRAPDATA,
315 	IOV_TRAPDATA_RAW,
316 	IOV_CTO_PREVENTION,
317 	IOV_PCIE_WD_RESET,
318 	IOV_DUMP_DONGLE,
319 	IOV_HWA_ENAB_BMAP,
320 	IOV_IDMA_ENABLE,
321 	IOV_IFRM_ENABLE,
322 	IOV_CLEAR_RING,
323 	IOV_DAR_ENABLE,
324 	IOV_DNGL_CAPS,   /**< returns string with dongle capabilities */
325 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
326 	IOV_GDB_SERVER,  /**< starts gdb server on given interface */
327 #endif /* DEBUGGER || DHD_DSCOPE */
328 	IOV_INB_DW_ENABLE,
329 	IOV_CTO_THRESHOLD,
330 	IOV_HSCBSIZE, /* get HSCB buffer size */
331 #ifdef DHD_BUS_MEM_ACCESS
332 	IOV_HSCBBYTES, /* copy HSCB buffer */
333 #endif // endif
334 	IOV_HP2P_ENABLE,
335 	IOV_HP2P_PKT_THRESHOLD,
336 	IOV_HP2P_TIME_THRESHOLD,
337 	IOV_HP2P_PKT_EXPIRY,
338 	IOV_HP2P_TXCPL_MAXITEMS,
339 	IOV_HP2P_RXCPL_MAXITEMS,
340 	IOV_EXTDTXS_IN_TXCPL,
341 	IOV_HOSTRDY_AFTER_INIT,
342 	IOV_PCIE_LAST /**< unused IOVAR */
343 };
344 
345 const bcm_iovar_t dhdpcie_iovars[] = {
346 	{"intr",	IOV_INTR,	0, 	0, IOVT_BOOL,	0 },
347 #ifdef DHD_BUS_MEM_ACCESS
348 	{"membytes",	IOV_MEMBYTES,	0, 	0, IOVT_BUFFER,	2 * sizeof(int) },
349 #endif /* DHD_BUS_MEM_ACCESS */
350 	{"memsize",	IOV_MEMSIZE,	0, 	0, IOVT_UINT32,	0 },
351 	{"dwnldstate",	IOV_SET_DOWNLOAD_STATE,	0, 	0, IOVT_BOOL,	0 },
352 	{"vars",	IOV_VARS,	0, 	0, IOVT_BUFFER,	0 },
353 	{"devreset",	IOV_DEVRESET,	0, 	0, IOVT_UINT8,	0 },
354 	{"pcie_device_trap", IOV_FORCE_FW_TRAP, 0, 	0, 0,	0 },
355 	{"pcie_lpbk",	IOV_PCIE_LPBK,	0,	0, IOVT_UINT32,	0 },
356 	{"cc_nvmshadow", IOV_CC_NVMSHADOW, 0,	0, IOVT_BUFFER, 0 },
357 	{"ramsize",	IOV_RAMSIZE,	0, 	0, IOVT_UINT32,	0 },
358 	{"ramstart",	IOV_RAMSTART,	0, 	0, IOVT_UINT32,	0 },
359 #ifdef DHD_PCIE_REG_ACCESS
360 	{"pciereg",	IOV_PCIEREG,	0, 	0, IOVT_BUFFER,	2 * sizeof(int32) },
361 	{"pciecfgreg",	IOV_PCIECFGREG,	0, 	0, IOVT_BUFFER,	2 * sizeof(int32) },
362 	{"pciecorereg",	IOV_PCIECOREREG,	0, 	0, IOVT_BUFFER,	2 * sizeof(int32) },
363 	{"pcieserdesreg",	IOV_PCIESERDESREG,	0, 	0, IOVT_BUFFER,	3 * sizeof(int32) },
364 	{"bar0secwinreg",	IOV_BAR0_SECWIN_REG,	0, 	0, IOVT_BUFFER,	sizeof(sdreg_t) },
365 	{"sbreg",	IOV_SBREG,	0,	0, IOVT_BUFFER,	sizeof(uint8) },
366 #endif /* DHD_PCIE_REG_ACCESS */
367 	{"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, sizeof(dma_xfer_info_t)},
368 	{"pcie_suspend", IOV_PCIE_SUSPEND,	DHD_IOVF_PWRREQ_BYPASS,	0, IOVT_UINT32,	0 },
369 	{"sleep_allowed",	IOV_SLEEP_ALLOWED,	0,	0, IOVT_BOOL,	0 },
370 	{"dngl_isolation", IOV_DONGLEISOLATION,	0, 	0, IOVT_UINT32,	0 },
371 	{"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD,	0,	0, IOVT_UINT32,	0 },
372 	{"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK,	0, 	0, IOVT_BUFFER,	0 },
373 	{"dma_ring_indices", IOV_DMA_RINGINDICES,	0, 	0, IOVT_UINT32,	0},
374 	{"metadata_dbg", IOV_METADATA_DBG,	0,	0, IOVT_BOOL,	0 },
375 	{"rx_metadata_len", IOV_RX_METADATALEN,	0, 	0, IOVT_UINT32,	0 },
376 	{"tx_metadata_len", IOV_TX_METADATALEN,	0, 	0, IOVT_UINT32,	0 },
377 	{"db1_for_mb", IOV_DB1_FOR_MB,	0, 	0, IOVT_UINT32,	0 },
378 	{"txp_thresh", IOV_TXP_THRESHOLD,	0,	0, IOVT_UINT32,	0 },
379 	{"buzzz_dump", IOV_BUZZZ_DUMP,		0, 	0, IOVT_UINT32,	0 },
380 	{"flow_prio_map", IOV_FLOW_PRIO_MAP,	0, 	0, IOVT_UINT32,	0 },
381 #ifdef DHD_PCIE_RUNTIMEPM
382 	{"idletime",    IOV_IDLETIME,   0,	0, IOVT_INT32,     0 },
383 #endif /* DHD_PCIE_RUNTIMEPM */
384 	{"rxbound",     IOV_RXBOUND,    0, 0,	IOVT_UINT32,    0 },
385 	{"txbound",     IOV_TXBOUND,    0, 0,	IOVT_UINT32,    0 },
386 #ifdef DHD_PCIE_REG_ACCESS
387 	{"aspm", IOV_PCIEASPM, 0, 0, IOVT_INT32, 0 },
388 #endif /* DHD_PCIE_REG_ACCESS */
389 	{"fw_hang_report", IOV_HANGREPORT,	0, 0,	IOVT_BOOL,	0 },
390 	{"h2d_mb_data",     IOV_H2D_MAILBOXDATA,    0, 0,      IOVT_UINT32,    0 },
391 	{"inforings",   IOV_INFORINGS,    0, 0,      IOVT_UINT32,    0 },
392 	{"h2d_phase",   IOV_H2D_PHASE,    0, 0,      IOVT_UINT32,    0 },
393 	{"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE,    0, 0,
394 	IOVT_UINT32,    0 },
395 	{"h2d_max_txpost",   IOV_H2D_TXPOST_MAX_ITEM,    0, 0,      IOVT_UINT32,    0 },
396 	{"trap_data",	IOV_TRAPDATA,	0, 0,	IOVT_BUFFER,	0 },
397 	{"trap_data_raw",	IOV_TRAPDATA_RAW,	0, 0,	IOVT_BUFFER,	0 },
398 	{"cto_prevention",	IOV_CTO_PREVENTION,	0, 0,	IOVT_UINT32,	0 },
399 	{"pcie_wd_reset",	IOV_PCIE_WD_RESET,	0,	0, IOVT_BOOL,	0 },
400 	{"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
401 	MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t))},
402 	{"clear_ring",   IOV_CLEAR_RING,    0, 0,  IOVT_UINT32,    0 },
403 	{"hwa_enab_bmap",   IOV_HWA_ENAB_BMAP,    0, 0,  IOVT_UINT32,    0 },
404 	{"idma_enable",   IOV_IDMA_ENABLE,    0, 0,  IOVT_UINT32,    0 },
405 	{"ifrm_enable",   IOV_IFRM_ENABLE,    0, 0,  IOVT_UINT32,    0 },
406 	{"dar_enable",   IOV_DAR_ENABLE,    0, 0,  IOVT_UINT32,    0 },
407 	{"cap", IOV_DNGL_CAPS,	0, 0, IOVT_BUFFER,	0},
408 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
409 	{"gdb_server", IOV_GDB_SERVER,    0, 0,      IOVT_UINT32,    0 },
410 #endif /* DEBUGGER || DHD_DSCOPE */
411 	{"inb_dw_enable",   IOV_INB_DW_ENABLE,    0, 0,  IOVT_UINT32,    0 },
412 	{"cto_threshold",	IOV_CTO_THRESHOLD,	0,	0, IOVT_UINT32,	0 },
413 	{"hscbsize",	IOV_HSCBSIZE,	0,	0,	IOVT_UINT32,	0 },
414 #ifdef DHD_BUS_MEM_ACCESS
415 	{"hscbbytes",	IOV_HSCBBYTES,	0,	0,	IOVT_BUFFER,	2 * sizeof(int32) },
416 #endif // endif
417 #ifdef DHD_HP2P
418 	{"hp2p_enable", IOV_HP2P_ENABLE,	0,	0, IOVT_UINT32,	0 },
419 	{"hp2p_pkt_thresh", IOV_HP2P_PKT_THRESHOLD,	0,	0, IOVT_UINT32,	0 },
420 	{"hp2p_time_thresh", IOV_HP2P_TIME_THRESHOLD,	0,	0, IOVT_UINT32,	0 },
421 	{"hp2p_pkt_expiry", IOV_HP2P_PKT_EXPIRY,	0,	0, IOVT_UINT32,	0 },
422 	{"hp2p_txcpl_maxitems", IOV_HP2P_TXCPL_MAXITEMS,	0,	0, IOVT_UINT32,	0 },
423 	{"hp2p_rxcpl_maxitems", IOV_HP2P_RXCPL_MAXITEMS,	0,	0, IOVT_UINT32,	0 },
424 #endif // endif
425 	{"extdtxs_in_txcpl", IOV_EXTDTXS_IN_TXCPL,	0,	0, IOVT_UINT32,	0 },
426 	{"hostrdy_after_init", IOV_HOSTRDY_AFTER_INIT,	0,	0, IOVT_UINT32,	0 },
427 	{NULL, 0, 0, 0, 0, 0 }
428 };
429 
430 #ifdef BCMQT
431 #define MAX_READ_TIMEOUT	200 * 1000 * 1000
432 #else
433 #define MAX_READ_TIMEOUT	5 * 1000 * 1000
434 #endif // endif
435 
436 #ifndef DHD_RXBOUND
437 #define DHD_RXBOUND		64
438 #endif // endif
439 #ifndef DHD_TXBOUND
440 #define DHD_TXBOUND		64
441 #endif // endif
442 
443 #define DHD_INFORING_BOUND	32
444 #define DHD_BTLOGRING_BOUND	32
445 
446 uint dhd_rxbound = DHD_RXBOUND;
447 uint dhd_txbound = DHD_TXBOUND;
448 
449 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
450 /** the GDB debugger layer will call back into this (bus) layer to read/write dongle memory */
451 static struct dhd_gdb_bus_ops_s  bus_ops = {
452 	.read_u16 = dhdpcie_bus_rtcm16,
453 	.read_u32 = dhdpcie_bus_rtcm32,
454 	.write_u32 = dhdpcie_bus_wtcm32,
455 };
456 #endif /* DEBUGGER || DHD_DSCOPE */
457 
458 bool
dhd_bus_get_flr_force_fail(struct dhd_bus * bus)459 dhd_bus_get_flr_force_fail(struct dhd_bus *bus)
460 {
461 	return bus->flr_force_fail;
462 }
463 
464 /**
465  * Register/Unregister functions are called by the main DHD entry point (eg module insertion) to
466  * link with the bus driver, in order to look for or await the device.
467  */
468 int
dhd_bus_register(void)469 dhd_bus_register(void)
470 {
471 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
472 
473 	return dhdpcie_bus_register();
474 }
475 
476 void
dhd_bus_unregister(void)477 dhd_bus_unregister(void)
478 {
479 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
480 
481 	dhdpcie_bus_unregister();
482 	return;
483 }
484 
485 /** returns a host virtual address */
486 uint32 *
dhdpcie_bus_reg_map(osl_t * osh,ulong addr,int size)487 dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
488 {
489 	return (uint32 *)REG_MAP(addr, size);
490 }
491 
492 void
dhdpcie_bus_reg_unmap(osl_t * osh,volatile char * addr,int size)493 dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size)
494 {
495 	REG_UNMAP(addr);
496 	return;
497 }
498 
499 /**
500  * retrun H2D Doorbell registers address
501  * use DAR registers instead of enum register for corerev >= 23 (4347B0)
502  */
503 static INLINE uint
dhd_bus_db0_addr_get(struct dhd_bus * bus)504 dhd_bus_db0_addr_get(struct dhd_bus *bus)
505 {
506 	uint addr = PCIH2D_MailBox;
507 	uint dar_addr = DAR_PCIH2D_DB0_0(bus->sih->buscorerev);
508 
509 	return ((DAR_ACTIVE(bus->dhd)) ? dar_addr : addr);
510 }
511 
512 static INLINE uint
dhd_bus_db0_addr_2_get(struct dhd_bus * bus)513 dhd_bus_db0_addr_2_get(struct dhd_bus *bus)
514 {
515 	return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB2_0(bus->sih->buscorerev) : PCIH2D_MailBox_2);
516 }
517 
518 static INLINE uint
dhd_bus_db1_addr_get(struct dhd_bus * bus)519 dhd_bus_db1_addr_get(struct dhd_bus *bus)
520 {
521 	return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB0_1(bus->sih->buscorerev) : PCIH2D_DB1);
522 }
523 
524 static INLINE uint
dhd_bus_db1_addr_1_get(struct dhd_bus * bus)525 dhd_bus_db1_addr_1_get(struct dhd_bus *bus)
526 {
527 	return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB1_1(bus->sih->buscorerev) : PCIH2D_DB1_1);
528 }
529 
530 /*
531  * WAR for SWWLAN-215055 - [4378B0] ARM fails to boot without DAR WL domain request
532  */
533 static INLINE void
dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus * bus,bool enable)534 dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus *bus, bool enable)
535 {
536 	if (enable) {
537 		si_corereg(bus->sih, bus->sih->buscoreidx,
538 			DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev),
539 			SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT,
540 			SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT);
541 	} else {
542 		si_corereg(bus->sih, bus->sih->buscoreidx,
543 			DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev),
544 			SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT, 0);
545 	}
546 }
547 
548 static INLINE void
_dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus * bus)549 _dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus *bus)
550 {
551 	uint mask;
552 
553 	/*
554 	 * If multiple de-asserts, decrement ref and return
555 	 * Clear power request when only one pending
556 	 * so initial request is not removed unexpectedly
557 	 */
558 	if (bus->pwr_req_ref > 1) {
559 		bus->pwr_req_ref--;
560 		return;
561 	}
562 
563 	ASSERT(bus->pwr_req_ref == 1);
564 
565 	if (MULTIBP_ENAB(bus->sih)) {
566 		/* Common BP controlled by HW so only need to toggle WL/ARM backplane */
567 		mask = SRPWR_DMN1_ARMBPSD_MASK;
568 	} else {
569 		mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
570 	}
571 
572 	si_srpwr_request(bus->sih, mask, 0);
573 	bus->pwr_req_ref = 0;
574 }
575 
576 static INLINE void
dhd_bus_pcie_pwr_req_clear(struct dhd_bus * bus)577 dhd_bus_pcie_pwr_req_clear(struct dhd_bus *bus)
578 {
579 	unsigned long flags = 0;
580 
581 	DHD_GENERAL_LOCK(bus->dhd, flags);
582 	_dhd_bus_pcie_pwr_req_clear_cmn(bus);
583 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
584 }
585 
586 static INLINE void
dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus * bus)587 dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus *bus)
588 {
589 	_dhd_bus_pcie_pwr_req_clear_cmn(bus);
590 }
591 
592 static INLINE void
_dhd_bus_pcie_pwr_req_cmn(struct dhd_bus * bus)593 _dhd_bus_pcie_pwr_req_cmn(struct dhd_bus *bus)
594 {
595 	uint mask, val;
596 
597 	/* If multiple request entries, increment reference and return */
598 	if (bus->pwr_req_ref > 0) {
599 		bus->pwr_req_ref++;
600 		return;
601 	}
602 
603 	ASSERT(bus->pwr_req_ref == 0);
604 
605 	if (MULTIBP_ENAB(bus->sih)) {
606 		/* Common BP controlled by HW so only need to toggle WL/ARM backplane */
607 		mask = SRPWR_DMN1_ARMBPSD_MASK;
608 		val = SRPWR_DMN1_ARMBPSD_MASK;
609 	} else {
610 		mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
611 		val = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
612 	}
613 
614 	si_srpwr_request(bus->sih, mask, val);
615 
616 	bus->pwr_req_ref = 1;
617 }
618 
619 static INLINE void
dhd_bus_pcie_pwr_req(struct dhd_bus * bus)620 dhd_bus_pcie_pwr_req(struct dhd_bus *bus)
621 {
622 	unsigned long flags = 0;
623 
624 	DHD_GENERAL_LOCK(bus->dhd, flags);
625 	_dhd_bus_pcie_pwr_req_cmn(bus);
626 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
627 }
628 
629 static INLINE void
_dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus * bus)630 _dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus *bus)
631 {
632 	uint mask, val;
633 
634 	mask = SRPWR_DMN_ALL_MASK(bus->sih);
635 	val = SRPWR_DMN_ALL_MASK(bus->sih);
636 
637 	si_srpwr_request(bus->sih, mask, val);
638 }
639 
640 static INLINE void
dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus * bus)641 dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus *bus)
642 {
643 	unsigned long flags = 0;
644 
645 	DHD_GENERAL_LOCK(bus->dhd, flags);
646 	_dhd_bus_pcie_pwr_req_pd0123_cmn(bus);
647 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
648 }
649 
650 static INLINE void
_dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus * bus)651 _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus *bus)
652 {
653 	uint mask;
654 
655 	mask = SRPWR_DMN_ALL_MASK(bus->sih);
656 
657 	si_srpwr_request(bus->sih, mask, 0);
658 }
659 
660 static INLINE void
dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus * bus)661 dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus *bus)
662 {
663 	unsigned long flags = 0;
664 
665 	DHD_GENERAL_LOCK(bus->dhd, flags);
666 	_dhd_bus_pcie_pwr_req_clear_pd0123_cmn(bus);
667 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
668 }
669 
670 static INLINE void
dhd_bus_pcie_pwr_req_nolock(struct dhd_bus * bus)671 dhd_bus_pcie_pwr_req_nolock(struct dhd_bus *bus)
672 {
673 	_dhd_bus_pcie_pwr_req_cmn(bus);
674 }
675 
676 bool
dhdpcie_chip_support_msi(dhd_bus_t * bus)677 dhdpcie_chip_support_msi(dhd_bus_t *bus)
678 {
679 	DHD_ERROR(("%s: buscorerev=%d chipid=0x%x\n",
680 		__FUNCTION__, bus->sih->buscorerev, si_chipid(bus->sih)));
681 	if (bus->sih->buscorerev <= 14 ||
682 		si_chipid(bus->sih) == BCM4375_CHIP_ID ||
683 		si_chipid(bus->sih) == BCM4362_CHIP_ID ||
684 		si_chipid(bus->sih) == BCM43751_CHIP_ID ||
685 		si_chipid(bus->sih) == BCM4361_CHIP_ID ||
686 		si_chipid(bus->sih) == CYW55560_CHIP_ID) {
687 		return FALSE;
688 	} else {
689 		return TRUE;
690 	}
691 }
692 
693 /**
694  * Called once for each hardware (dongle) instance that this DHD manages.
695  *
696  * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
697  * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
698  * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
699  *
700  * 'tcm' is the *host* virtual address at which tcm is mapped.
701  */
dhdpcie_bus_attach(osl_t * osh,dhd_bus_t ** bus_ptr,volatile char * regs,volatile char * tcm,void * pci_dev)702 int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr,
703 	volatile char *regs, volatile char *tcm, void *pci_dev)
704 {
705 	dhd_bus_t *bus = NULL;
706 	int ret = BCME_OK;
707 	/* customvar1 and customvar2 are customer configurable CIS tuples in OTP.
708 	* In dual chip (PCIE) scenario, customvar2 is used as a hint to detect
709 	* the chip variants and load the right firmware and NVRAM
710 	*/
711 	/* Below vars are set to 0x0 as OTPed value can not take 0x0 */
712 	uint32 customvar1 = 0x0;
713 	uint32 customvar2 = 0x0;
714 	uint32 otp_hw_module_variant = 0x0;
715 	DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
716 
717 	do {
718 		if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) {
719 			DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
720 			ret = BCME_NORESOURCE;
721 			break;
722 		}
723 
724 		bus->regs = regs;
725 		bus->tcm = tcm;
726 		bus->osh = osh;
727 		/* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */
728 		bus->dev = (struct pci_dev *)pci_dev;
729 
730 		dll_init(&bus->flowring_active_list);
731 #ifdef IDLE_TX_FLOW_MGMT
732 		bus->active_list_last_process_ts = OSL_SYSUPTIME();
733 #endif /* IDLE_TX_FLOW_MGMT */
734 
735 		/* Attach pcie shared structure */
736 		if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) {
737 			DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__));
738 			ret = BCME_NORESOURCE;
739 			break;
740 		}
741 
742 		/* dhd_common_init(osh); */
743 
744 		if (dhdpcie_dongle_attach(bus)) {
745 			DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
746 			ret = BCME_NOTREADY;
747 			break;
748 		}
749 
750 		if (!hw_module_variant) {
751 			/* For single wifi module */
752 			goto enumerate_module;
753 		}
754 
755 		/* read otp variable customvar and store in dhd->customvar1 and dhd->customvar2 */
756 		if (dhdpcie_sromotp_customvar(bus, &customvar1, &customvar2)) {
757 			DHD_ERROR(("%s: dhdpcie_sromotp_customvar failed\n", __FUNCTION__));
758 			break;
759 		}
760 		if (!customvar2) {
761 			DHD_ERROR(("%s:customvar2 is not OTPed"
762 				   "hw_module_variant=0x%x\n",
763 				   __FUNCTION__, hw_module_variant));
764 			goto enumerate_module;
765 		}
766 		/* customvar2=0xNNMMLLKK, LL is module variant */
767 		otp_hw_module_variant = (customvar2 >> 8) & 0xFF;
768 		DHD_TRACE(("%s hw_module_variant=0x%x and"
769 			"OTPed-module_variant=0x%x\n", __func__,
770 			 hw_module_variant, otp_hw_module_variant));
771 		if (hw_module_variant != otp_hw_module_variant) {
772 			DHD_ERROR(("%s: Not going to enumerate this module as "
773 				"hw_module_variant=0x%x and "
774 				"OTPed-module_variant=0x%x didn't match\n",
775 				__FUNCTION__, hw_module_variant, otp_hw_module_variant));
776 			break;
777 		}
778 		DHD_TRACE(("%s: Going to enumerate this module as "
779 			"hw_module_variant=0x%x and "
780 			"OTPed-module_variant=0x%x match\n",
781 			__FUNCTION__, hw_module_variant, otp_hw_module_variant));
782 enumerate_module:
783 		/* software resources */
784 		if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
785 			DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
786 			ret = BCME_ERROR;
787 			break;
788 		}
789 
790 		DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
791 		bus->dhd->busstate = DHD_BUS_DOWN;
792 		bus->dhd->hostrdy_after_init = TRUE;
793 		bus->db1_for_mb = TRUE;
794 		bus->dhd->hang_report = TRUE;
795 		bus->use_mailbox = FALSE;
796 		bus->use_d0_inform = FALSE;
797 		bus->intr_enabled = FALSE;
798 		bus->flr_force_fail = FALSE;
799 		/* By default disable HWA and enable it via iovar */
800 		bus->hwa_enab_bmap = 0;
801 		/* update the dma indices if set through module parameter. */
802 		if (dma_ring_indices != 0) {
803 			dhdpcie_set_dma_ring_indices(bus->dhd, dma_ring_indices);
804 		}
805 		/* update h2d phase support if set through module parameter */
806 		bus->dhd->h2d_phase_supported = h2d_phase ? TRUE : FALSE;
807 		/* update force trap on bad phase if set through module parameter */
808 		bus->dhd->force_dongletrap_on_bad_h2d_phase =
809 			force_trap_bad_h2d_phase ? TRUE : FALSE;
810 #ifdef IDLE_TX_FLOW_MGMT
811 		bus->enable_idle_flowring_mgmt = FALSE;
812 #endif /* IDLE_TX_FLOW_MGMT */
813 		bus->irq_registered = FALSE;
814 
815 #ifdef DHD_MSI_SUPPORT
816 #ifdef DHD_FORCE_MSI
817 		bus->d2h_intr_method = PCIE_MSI;
818 #else
819 		bus->d2h_intr_method = enable_msi && dhdpcie_chip_support_msi(bus) ?
820 			PCIE_MSI : PCIE_INTX;
821 #endif /* DHD_FORCE_MSI */
822 #else
823 		bus->d2h_intr_method = PCIE_INTX;
824 #endif /* DHD_MSI_SUPPORT */
825 
826 #ifdef DHD_HP2P
827 		bus->hp2p_txcpl_max_items = DHD_MAX_ITEMS_HPP_TXCPL_RING;
828 		bus->hp2p_rxcpl_max_items = DHD_MAX_ITEMS_HPP_RXCPL_RING;
829 #endif /* DHD_HP2P */
830 
831 		DHD_TRACE(("%s: EXIT SUCCESS\n",
832 			__FUNCTION__));
833 		g_dhd_bus = bus;
834 		*bus_ptr = bus;
835 		return ret;
836 	} while (0);
837 
838 	DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
839 
840 	if (bus && bus->pcie_sh) {
841 		MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
842 	}
843 
844 	if (bus) {
845 		MFREE(osh, bus, sizeof(dhd_bus_t));
846 	}
847 
848 	return ret;
849 }
850 
851 bool
dhd_bus_skip_clm(dhd_pub_t * dhdp)852 dhd_bus_skip_clm(dhd_pub_t *dhdp)
853 {
854 	switch (dhd_bus_chip_id(dhdp)) {
855 		case BCM4369_CHIP_ID:
856 			return TRUE;
857 		default:
858 			return FALSE;
859 	}
860 }
861 
862 uint
dhd_bus_chip(struct dhd_bus * bus)863 dhd_bus_chip(struct dhd_bus *bus)
864 {
865 	ASSERT(bus->sih != NULL);
866 	return bus->sih->chip;
867 }
868 
869 uint
dhd_bus_chiprev(struct dhd_bus * bus)870 dhd_bus_chiprev(struct dhd_bus *bus)
871 {
872 	ASSERT(bus);
873 	ASSERT(bus->sih != NULL);
874 	return bus->sih->chiprev;
875 }
876 
877 void *
dhd_bus_pub(struct dhd_bus * bus)878 dhd_bus_pub(struct dhd_bus *bus)
879 {
880 	return bus->dhd;
881 }
882 
883 void *
dhd_bus_sih(struct dhd_bus * bus)884 dhd_bus_sih(struct dhd_bus *bus)
885 {
886 	return (void *)bus->sih;
887 }
888 
889 void *
dhd_bus_txq(struct dhd_bus * bus)890 dhd_bus_txq(struct dhd_bus *bus)
891 {
892 	return &bus->txq;
893 }
894 
895 /** Get Chip ID version */
dhd_bus_chip_id(dhd_pub_t * dhdp)896 uint dhd_bus_chip_id(dhd_pub_t *dhdp)
897 {
898 	dhd_bus_t *bus = dhdp->bus;
899 	return  bus->sih->chip;
900 }
901 
902 /** Get Chip Rev ID version */
dhd_bus_chiprev_id(dhd_pub_t * dhdp)903 uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
904 {
905 	dhd_bus_t *bus = dhdp->bus;
906 	return bus->sih->chiprev;
907 }
908 
909 /** Get Chip Pkg ID version */
dhd_bus_chippkg_id(dhd_pub_t * dhdp)910 uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
911 {
912 	dhd_bus_t *bus = dhdp->bus;
913 	return bus->sih->chippkg;
914 }
915 
916 /** Conduct Loopback test */
917 int
dhd_bus_dmaxfer_lpbk(dhd_pub_t * dhdp,uint32 type)918 dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type)
919 {
920 	dma_xfer_info_t dmaxfer_lpbk;
921 	int ret = BCME_OK;
922 
923 #define PCIE_DMAXFER_LPBK_LENGTH	4096
924 	memset(&dmaxfer_lpbk, 0, sizeof(dma_xfer_info_t));
925 	dmaxfer_lpbk.version = DHD_DMAXFER_VERSION;
926 	dmaxfer_lpbk.length = (uint16)sizeof(dma_xfer_info_t);
927 	dmaxfer_lpbk.num_bytes = PCIE_DMAXFER_LPBK_LENGTH;
928 	dmaxfer_lpbk.type = type;
929 	dmaxfer_lpbk.should_wait = TRUE;
930 
931 	ret = dhd_bus_iovar_op(dhdp, "pcie_dmaxfer", NULL, 0,
932 		(char *)&dmaxfer_lpbk, sizeof(dma_xfer_info_t), IOV_SET);
933 	if (ret < 0) {
934 		DHD_ERROR(("failed to start PCIe Loopback Test!!! "
935 			"Type:%d Reason:%d\n", type, ret));
936 		return ret;
937 	}
938 
939 	if (dmaxfer_lpbk.status != DMA_XFER_SUCCESS) {
940 		DHD_ERROR(("failed to check PCIe Loopback Test!!! "
941 			"Type:%d Status:%d Error code:%d\n", type,
942 			dmaxfer_lpbk.status, dmaxfer_lpbk.error_code));
943 		ret = BCME_ERROR;
944 	} else {
945 		DHD_ERROR(("successful to check PCIe Loopback Test"
946 			" Type:%d\n", type));
947 	}
948 #undef PCIE_DMAXFER_LPBK_LENGTH
949 
950 	return ret;
951 }
952 
953 /* Log the lastest DPC schedule time */
954 void
dhd_bus_set_dpc_sched_time(dhd_pub_t * dhdp)955 dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp)
956 {
957 	dhdp->bus->dpc_sched_time = OSL_LOCALTIME_NS();
958 }
959 
960 /* Check if there is DPC scheduling errors */
961 bool
dhd_bus_query_dpc_sched_errors(dhd_pub_t * dhdp)962 dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp)
963 {
964 	dhd_bus_t *bus = dhdp->bus;
965 	bool sched_err;
966 
967 	if (bus->dpc_entry_time < bus->isr_exit_time) {
968 		/* Kernel doesn't schedule the DPC after processing PCIe IRQ */
969 		sched_err = TRUE;
970 	} else if (bus->dpc_entry_time < bus->resched_dpc_time) {
971 		/* Kernel doesn't schedule the DPC after DHD tries to reschedule
972 		 * the DPC due to pending work items to be processed.
973 		 */
974 		sched_err = TRUE;
975 	} else {
976 		sched_err = FALSE;
977 	}
978 
979 	if (sched_err) {
980 		/* print out minimum timestamp info */
981 		DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
982 			" isr_exit_time="SEC_USEC_FMT
983 			" dpc_entry_time="SEC_USEC_FMT
984 			"\ndpc_exit_time="SEC_USEC_FMT
985 			" dpc_sched_time="SEC_USEC_FMT
986 			" resched_dpc_time="SEC_USEC_FMT"\n",
987 			GET_SEC_USEC(bus->isr_entry_time),
988 			GET_SEC_USEC(bus->isr_exit_time),
989 			GET_SEC_USEC(bus->dpc_entry_time),
990 			GET_SEC_USEC(bus->dpc_exit_time),
991 			GET_SEC_USEC(bus->dpc_sched_time),
992 			GET_SEC_USEC(bus->resched_dpc_time)));
993 	}
994 
995 	return sched_err;
996 }
997 
998 /** Read and clear intstatus. This should be called with interrupts disabled or inside isr */
999 uint32
dhdpcie_bus_intstatus(dhd_bus_t * bus)1000 dhdpcie_bus_intstatus(dhd_bus_t *bus)
1001 {
1002 	uint32 intstatus = 0;
1003 	uint32 intmask = 0;
1004 
1005 	if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
1006 		DHD_ERROR(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__));
1007 		return intstatus;
1008 	}
1009 	if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
1010 		(bus->sih->buscorerev == 2)) {
1011 		intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
1012 		dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
1013 		intstatus &= I_MB;
1014 	} else {
1015 		/* this is a PCIE core register..not a config register... */
1016 		intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
1017 
1018 		/* this is a PCIE core register..not a config register... */
1019 		intmask = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, 0, 0);
1020 		/* Is device removed. intstatus & intmask read 0xffffffff */
1021 		if (intstatus == (uint32)-1 || intmask == (uint32)-1) {
1022 			DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__));
1023 			DHD_ERROR(("%s: INTSTAT : 0x%x INTMASK : 0x%x.\n",
1024 			    __FUNCTION__, intstatus, intmask));
1025 			bus->is_linkdown = TRUE;
1026 			dhd_pcie_debug_info_dump(bus->dhd);
1027 #ifdef CUSTOMER_HW4_DEBUG
1028 #if defined(OEM_ANDROID)
1029 #ifdef SUPPORT_LINKDOWN_RECOVERY
1030 #ifdef CONFIG_ARCH_MSM
1031 			bus->no_cfg_restore = 1;
1032 #endif /* CONFIG_ARCH_MSM */
1033 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1034 			bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN_EP_DETECT;
1035 			dhd_os_send_hang_message(bus->dhd);
1036 #endif /* OEM_ANDROID */
1037 #endif /* CUSTOMER_HW4_DEBUG */
1038 			return intstatus;
1039 		}
1040 
1041 		intstatus &= intmask;
1042 
1043 		/*
1044 		 * The fourth argument to si_corereg is the "mask" fields of the register to update
1045 		 * and the fifth field is the "value" to update. Now if we are interested in only
1046 		 * few fields of the "mask" bit map, we should not be writing back what we read
1047 		 * By doing so, we might clear/ack interrupts that are not handled yet.
1048 		 */
1049 		si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
1050 			intstatus);
1051 
1052 		intstatus &= bus->def_intmask;
1053 	}
1054 
1055 	return intstatus;
1056 }
1057 
1058 void
dhdpcie_cto_recovery_handler(dhd_pub_t * dhd)1059 dhdpcie_cto_recovery_handler(dhd_pub_t *dhd)
1060 {
1061 	dhd_bus_t *bus = dhd->bus;
1062 	int ret;
1063 
1064 	/* Disable PCIe Runtime PM to avoid D3_ACK timeout.
1065 	 */
1066 	DHD_DISABLE_RUNTIME_PM(dhd);
1067 
1068 	/* Sleep for 1 seconds so that any AXI timeout
1069 	 * if running on ALP clock also will be captured
1070 	 */
1071 	OSL_SLEEP(1000);
1072 
1073 	/* reset backplane and cto,
1074 	 * then access through pcie is recovered.
1075 	 */
1076 	ret = dhdpcie_cto_error_recovery(bus);
1077 	if (!ret) {
1078 		/* Waiting for backplane reset */
1079 		OSL_SLEEP(10);
1080 		/* Dump debug Info */
1081 		dhd_prot_debug_info_print(bus->dhd);
1082 		/* Dump console buffer */
1083 		dhd_bus_dump_console_buffer(bus);
1084 #if defined(DHD_FW_COREDUMP)
1085 		/* save core dump or write to a file */
1086 		if (!bus->is_linkdown && bus->dhd->memdump_enabled) {
1087 #ifdef DHD_SSSR_DUMP
1088 			bus->dhd->collect_sssr = TRUE;
1089 #endif /* DHD_SSSR_DUMP */
1090 			bus->dhd->memdump_type = DUMP_TYPE_CTO_RECOVERY;
1091 			dhdpcie_mem_dump(bus);
1092 		}
1093 #endif /* DHD_FW_COREDUMP */
1094 	}
1095 #ifdef OEM_ANDROID
1096 #ifdef SUPPORT_LINKDOWN_RECOVERY
1097 #ifdef CONFIG_ARCH_MSM
1098 	bus->no_cfg_restore = 1;
1099 #endif /* CONFIG_ARCH_MSM */
1100 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1101 	bus->is_linkdown = TRUE;
1102 	bus->dhd->hang_reason = HANG_REASON_PCIE_CTO_DETECT;
1103 	/* Send HANG event */
1104 	dhd_os_send_hang_message(bus->dhd);
1105 #endif /* OEM_ANDROID */
1106 }
1107 
1108 /**
1109  * Name:  dhdpcie_bus_isr
1110  * Parameters:
1111  * 1: IN int irq   -- interrupt vector
1112  * 2: IN void *arg      -- handle to private data structure
1113  * Return value:
1114  * Status (TRUE or FALSE)
1115  *
1116  * Description:
1117  * Interrupt Service routine checks for the status register,
1118  * disable interrupt and queue DPC if mail box interrupts are raised.
1119  */
1120 int32
dhdpcie_bus_isr(dhd_bus_t * bus)1121 dhdpcie_bus_isr(dhd_bus_t *bus)
1122 {
1123 	uint32 intstatus = 0;
1124 
1125 	do {
1126 		DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1127 		/* verify argument */
1128 		if (!bus) {
1129 			DHD_LOG_MEM(("%s : bus is null pointer, exit \n", __FUNCTION__));
1130 			break;
1131 		}
1132 
1133 		if (bus->dhd->dongle_reset) {
1134 			DHD_LOG_MEM(("%s : dongle is reset\n", __FUNCTION__));
1135 			break;
1136 		}
1137 
1138 		if (bus->dhd->busstate == DHD_BUS_DOWN) {
1139 			DHD_LOG_MEM(("%s : bus is down \n", __FUNCTION__));
1140 			break;
1141 		}
1142 
1143 		/* avoid processing of interrupts until msgbuf prot is inited */
1144 		if (!bus->intr_enabled) {
1145 			DHD_INFO(("%s, not ready to receive interrupts\n", __FUNCTION__));
1146 			break;
1147 		}
1148 
1149 		if (PCIECTO_ENAB(bus)) {
1150 			/* read pci_intstatus */
1151 			intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4);
1152 
1153 			if (intstatus & PCI_CTO_INT_MASK) {
1154 				DHD_ERROR(("%s: ##### CTO RECOVERY REPORTED BY DONGLE "
1155 					"intstat=0x%x enab=%d\n", __FUNCTION__,
1156 					intstatus, bus->cto_enable));
1157 				bus->cto_triggered = 1;
1158 				/*
1159 				 * DAR still accessible
1160 				 */
1161 				dhd_bus_dump_dar_registers(bus);
1162 
1163 				/* Disable further PCIe interrupts */
1164 				dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
1165 				/* Stop Tx flow */
1166 				dhd_bus_stop_queue(bus);
1167 
1168 				/* Schedule CTO recovery */
1169 				dhd_schedule_cto_recovery(bus->dhd);
1170 
1171 				return TRUE;
1172 			}
1173 		}
1174 
1175 		if (bus->d2h_intr_method == PCIE_MSI) {
1176 			/* For MSI, as intstatus is cleared by firmware, no need to read */
1177 			goto skip_intstatus_read;
1178 		}
1179 
1180 		intstatus = dhdpcie_bus_intstatus(bus);
1181 
1182 		/* Check if the interrupt is ours or not */
1183 		if (intstatus == 0) {
1184 			/* in EFI since we poll for interrupt, this message will flood the logs
1185 			* so disable this for EFI
1186 			*/
1187 			DHD_LOG_MEM(("%s : this interrupt is not ours\n", __FUNCTION__));
1188 			bus->non_ours_irq_count++;
1189 			bus->last_non_ours_irq_time = OSL_LOCALTIME_NS();
1190 			break;
1191 		}
1192 
1193 		/* save the intstatus */
1194 		/* read interrupt status register!! Status bits will be cleared in DPC !! */
1195 		bus->intstatus = intstatus;
1196 
1197 		/* return error for 0xFFFFFFFF */
1198 		if (intstatus == (uint32)-1) {
1199 			DHD_LOG_MEM(("%s : wrong interrupt status val : 0x%x\n",
1200 				__FUNCTION__, intstatus));
1201 			dhdpcie_disable_irq_nosync(bus);
1202 			break;
1203 		}
1204 
1205 skip_intstatus_read:
1206 		/*  Overall operation:
1207 		 *    - Mask further interrupts
1208 		 *    - Read/ack intstatus
1209 		 *    - Take action based on bits and state
1210 		 *    - Reenable interrupts (as per state)
1211 		 */
1212 
1213 		/* Count the interrupt call */
1214 		bus->intrcount++;
1215 
1216 		bus->ipend = TRUE;
1217 
1218 		bus->isr_intr_disable_count++;
1219 
1220 		/* For Linux, Macos etc (otherthan NDIS) instead of disabling
1221 		* dongle interrupt by clearing the IntMask, disable directly
1222 		* interrupt from the host side, so that host will not recieve
1223 		* any interrupts at all, even though dongle raises interrupts
1224 		*/
1225 		dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
1226 
1227 		bus->intdis = TRUE;
1228 
1229 #if defined(PCIE_ISR_THREAD)
1230 
1231 		DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
1232 		DHD_OS_WAKE_LOCK(bus->dhd);
1233 		while (dhd_bus_dpc(bus));
1234 		DHD_OS_WAKE_UNLOCK(bus->dhd);
1235 #else
1236 		bus->dpc_sched = TRUE;
1237 		dhd_sched_dpc(bus->dhd);     /* queue DPC now!! */
1238 #endif /* defined(SDIO_ISR_THREAD) */
1239 
1240 		DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__));
1241 		return TRUE;
1242 
1243 	} while (0);
1244 
1245 	DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__));
1246 	return FALSE;
1247 }
1248 
1249 int
dhdpcie_set_pwr_state(dhd_bus_t * bus,uint state)1250 dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state)
1251 {
1252 	uint32 cur_state = 0;
1253 	uint32 pm_csr = 0;
1254 	osl_t *osh = bus->osh;
1255 
1256 	pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1257 	cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
1258 
1259 	if (cur_state == state) {
1260 		DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__, cur_state));
1261 		return BCME_OK;
1262 	}
1263 
1264 	if (state > PCIECFGREG_PM_CSR_STATE_D3_HOT)
1265 		return BCME_ERROR;
1266 
1267 	/* Validate the state transition
1268 	* if already in a lower power state, return error
1269 	*/
1270 	if (state != PCIECFGREG_PM_CSR_STATE_D0 &&
1271 			cur_state <= PCIECFGREG_PM_CSR_STATE_D3_COLD &&
1272 			cur_state > state) {
1273 		DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__));
1274 		return BCME_ERROR;
1275 	}
1276 
1277 	pm_csr &= ~PCIECFGREG_PM_CSR_STATE_MASK;
1278 	pm_csr |= state;
1279 
1280 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32), pm_csr);
1281 
1282 	/* need to wait for the specified mandatory pcie power transition delay time */
1283 	if (state == PCIECFGREG_PM_CSR_STATE_D3_HOT ||
1284 			cur_state == PCIECFGREG_PM_CSR_STATE_D3_HOT)
1285 			OSL_DELAY(DHDPCIE_PM_D3_DELAY);
1286 	else if (state == PCIECFGREG_PM_CSR_STATE_D2 ||
1287 			cur_state == PCIECFGREG_PM_CSR_STATE_D2)
1288 			OSL_DELAY(DHDPCIE_PM_D2_DELAY);
1289 
1290 	/* read back the power state and verify */
1291 	pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1292 	cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
1293 	if (cur_state != state) {
1294 		DHD_ERROR(("%s: power transition failed ! Current state is %u \n",
1295 				__FUNCTION__, cur_state));
1296 		return BCME_ERROR;
1297 	} else {
1298 		DHD_ERROR(("%s: power transition to %u success \n",
1299 				__FUNCTION__, cur_state));
1300 	}
1301 
1302 	return BCME_OK;
1303 }
1304 
1305 int
dhdpcie_config_check(dhd_bus_t * bus)1306 dhdpcie_config_check(dhd_bus_t *bus)
1307 {
1308 	uint32 i, val;
1309 	int ret = BCME_ERROR;
1310 
1311 	for (i = 0; i < DHDPCIE_CONFIG_CHECK_RETRY_COUNT; i++) {
1312 		val = OSL_PCI_READ_CONFIG(bus->osh, PCI_CFG_VID, sizeof(uint32));
1313 		if ((val & 0xFFFF) == VENDOR_BROADCOM || (val & 0xFFFF) == VENDOR_CYPRESS) {
1314 			ret = BCME_OK;
1315 			break;
1316 		}
1317 		OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS * 1000);
1318 	}
1319 
1320 	return ret;
1321 }
1322 
1323 int
dhdpcie_config_restore(dhd_bus_t * bus,bool restore_pmcsr)1324 dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr)
1325 {
1326 	uint32 i;
1327 	osl_t *osh = bus->osh;
1328 
1329 	if (BCME_OK != dhdpcie_config_check(bus)) {
1330 		return BCME_ERROR;
1331 	}
1332 
1333 	for (i = PCI_CFG_REV >> 2; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
1334 		OSL_PCI_WRITE_CONFIG(osh, i << 2, sizeof(uint32), bus->saved_config.header[i]);
1335 	}
1336 	OSL_PCI_WRITE_CONFIG(osh, PCI_CFG_CMD, sizeof(uint32), bus->saved_config.header[1]);
1337 
1338 	if (restore_pmcsr)
1339 		OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR,
1340 			sizeof(uint32), bus->saved_config.pmcsr);
1341 
1342 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_CAP, sizeof(uint32), bus->saved_config.msi_cap);
1343 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, sizeof(uint32),
1344 			bus->saved_config.msi_addr0);
1345 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
1346 			sizeof(uint32), bus->saved_config.msi_addr1);
1347 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_DATA,
1348 			sizeof(uint32), bus->saved_config.msi_data);
1349 
1350 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_DEV_STATUS_CTRL,
1351 			sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat);
1352 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGGEN_DEV_STATUS_CTRL2,
1353 			sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat2);
1354 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL,
1355 			sizeof(uint32), bus->saved_config.exp_link_ctrl_stat);
1356 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL2,
1357 			sizeof(uint32), bus->saved_config.exp_link_ctrl_stat2);
1358 
1359 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
1360 			sizeof(uint32), bus->saved_config.l1pm0);
1361 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
1362 			sizeof(uint32), bus->saved_config.l1pm1);
1363 
1364 	OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, sizeof(uint32),
1365 			bus->saved_config.bar0_win);
1366 	dhdpcie_setbar1win(bus, bus->saved_config.bar1_win);
1367 
1368 	return BCME_OK;
1369 }
1370 
1371 int
dhdpcie_config_save(dhd_bus_t * bus)1372 dhdpcie_config_save(dhd_bus_t *bus)
1373 {
1374 	uint32 i;
1375 	osl_t *osh = bus->osh;
1376 
1377 	if (BCME_OK != dhdpcie_config_check(bus)) {
1378 		return BCME_ERROR;
1379 	}
1380 
1381 	for (i = 0; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
1382 		bus->saved_config.header[i] = OSL_PCI_READ_CONFIG(osh, i << 2, sizeof(uint32));
1383 	}
1384 
1385 	bus->saved_config.pmcsr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1386 
1387 	bus->saved_config.msi_cap = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_CAP,
1388 			sizeof(uint32));
1389 	bus->saved_config.msi_addr0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_L,
1390 			sizeof(uint32));
1391 	bus->saved_config.msi_addr1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
1392 			sizeof(uint32));
1393 	bus->saved_config.msi_data = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_DATA,
1394 			sizeof(uint32));
1395 
1396 	bus->saved_config.exp_dev_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
1397 			PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32));
1398 	bus->saved_config.exp_dev_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
1399 			PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32));
1400 	bus->saved_config.exp_link_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
1401 			PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32));
1402 	bus->saved_config.exp_link_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
1403 			PCIECFGREG_LINK_STATUS_CTRL2, sizeof(uint32));
1404 
1405 	bus->saved_config.l1pm0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
1406 			sizeof(uint32));
1407 	bus->saved_config.l1pm1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
1408 			sizeof(uint32));
1409 
1410 	bus->saved_config.bar0_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR0_WIN,
1411 			sizeof(uint32));
1412 	bus->saved_config.bar1_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR1_WIN,
1413 			sizeof(uint32));
1414 
1415 	return BCME_OK;
1416 }
1417 
1418 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1419 dhd_pub_t *link_recovery = NULL;
1420 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1421 
1422 static void
dhdpcie_bus_intr_init(dhd_bus_t * bus)1423 dhdpcie_bus_intr_init(dhd_bus_t *bus)
1424 {
1425 	uint buscorerev = bus->sih->buscorerev;
1426 	bus->pcie_mailbox_int = PCIMailBoxInt(buscorerev);
1427 	bus->pcie_mailbox_mask = PCIMailBoxMask(buscorerev);
1428 	bus->d2h_mb_mask = PCIE_MB_D2H_MB_MASK(buscorerev);
1429 	bus->def_intmask = PCIE_MB_D2H_MB_MASK(buscorerev);
1430 	if (buscorerev < 64) {
1431 		bus->def_intmask |= PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
1432 	}
1433 }
1434 
1435 static void
dhdpcie_cc_watchdog_reset(dhd_bus_t * bus)1436 dhdpcie_cc_watchdog_reset(dhd_bus_t *bus)
1437 {
1438 	uint32 wd_en = (bus->sih->buscorerev >= 66) ? WD_SSRESET_PCIE_F0_EN :
1439 		(WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_ALL_FN_EN);
1440 	pcie_watchdog_reset(bus->osh, bus->sih, WD_ENABLE_MASK, wd_en);
1441 }
1442 
1443 void
dhdpcie_dongle_reset(dhd_bus_t * bus)1444 dhdpcie_dongle_reset(dhd_bus_t *bus)
1445 {
1446 	/* if the pcie link is down, watchdog reset
1447 	 * should not be done, as it may hang
1448 	 */
1449 	if (bus->is_linkdown) {
1450 		return;
1451 	}
1452 
1453 	/* dhd_bus_perform_flr will return BCME_UNSUPPORTED if chip is not FLR capable */
1454 	if (dhd_bus_perform_flr(bus, FALSE) == BCME_UNSUPPORTED) {
1455 #ifdef DHD_USE_BP_RESET
1456 		/* Backplane reset using SPROM cfg register(0x88) for buscorerev <= 24 */
1457 		dhd_bus_perform_bp_reset(bus);
1458 #else
1459 		/* Legacy chipcommon watchdog reset */
1460 		dhdpcie_cc_watchdog_reset(bus);
1461 #endif /* DHD_USE_BP_RESET */
1462 	}
1463 }
1464 
1465 #ifdef CHIPS_CUSTOMER_HW6
1466 void
dhdpcie_bus_mpu_disable(dhd_bus_t * bus)1467 dhdpcie_bus_mpu_disable(dhd_bus_t *bus)
1468 {
1469 	volatile uint32 *cr4_regs;
1470 	if (BCM4378_CHIP(bus->sih->chip)) {
1471 		cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
1472 		if (cr4_regs == NULL) {
1473 			DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
1474 			return;
1475 		}
1476 		if (R_REG(bus->osh, cr4_regs + ARMCR4REG_CORECAP) & ACC_MPU_MASK) {
1477 			/* bus mpu is supported */
1478 			W_REG(bus->osh, cr4_regs + ARMCR4REG_MPUCTRL, 0);
1479 		}
1480 	}
1481 }
1482 #endif /* CHIPS_CUSTOMER_HW6 */
1483 
1484 static bool
dhdpcie_dongle_attach(dhd_bus_t * bus)1485 dhdpcie_dongle_attach(dhd_bus_t *bus)
1486 {
1487 	osl_t *osh = bus->osh;
1488 	volatile void *regsva = (volatile void*)bus->regs;
1489 	uint16 devid;
1490 	uint32 val;
1491 	uint32 reg_val = 0;
1492 	bool is_pcie_reset = FALSE;
1493 	uint32 secureboot;
1494 	sbpcieregs_t *sbpcieregs;
1495 	bool dongle_isolation;
1496 	int32 bcmerror = BCME_ERROR;
1497 	DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
1498 
1499 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1500 	link_recovery = bus->dhd;
1501 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1502 
1503 	bus->alp_only = TRUE;
1504 	bus->sih = NULL;
1505 
1506 	/* Checking PCIe bus status with reading configuration space */
1507 	val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32));
1508 	if ((val & 0xFFFF) != VENDOR_BROADCOM && (val & 0xFFFF) != VENDOR_CYPRESS) {
1509 		DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__));
1510 		goto fail;
1511 	}
1512 	devid = (val >> 16) & 0xFFFF;
1513 	bus->cl_devid = devid;
1514 
1515 	/* Set bar0 window to si_enum_base */
1516 	dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(devid));
1517 
1518 	/*
1519 	 * Checking PCI_SPROM_CONTROL register for preventing invalid address access
1520 	 * due to switch address space from PCI_BUS to SI_BUS.
1521 	 */
1522 	val = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32));
1523 	if (val == 0xffffffff) {
1524 		DHD_ERROR(("%s : failed to read SPROM control register\n", __FUNCTION__));
1525 		goto fail;
1526 	}
1527 
1528 	/* Getting Secureboot capability to make sure that the
1529 	 * functionalities are ristricted to the chips having bootloader
1530 	 */
1531 	secureboot = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_REVID, sizeof(uint32));
1532 
1533 	if (isset(&secureboot, PCIECFGREG_SECURE_MODE_SHIFT)) {
1534 
1535 		/* Set bar0 window to si_pcie_enum_base */
1536 		dhdpcie_bus_cfg_set_bar0_win(bus, si_pcie_enum_base(devid));
1537 		sbpcieregs = (sbpcieregs_t*)(bus->regs);
1538 		DHD_INFO(("%s: before read reg_val:%d\n", __FUNCTION__, reg_val));
1539 		reg_val = R_REG(osh, &sbpcieregs->u1.dar_64.d2h_msg_reg0);
1540 		DHD_INFO(("%s: after reg_val:%d\n", __FUNCTION__, reg_val));
1541 		if (reg_val != D2H_HS_START_STATE || reg_val != (D2H_HS_READY_STATE)) {
1542 		/* si_attach() will provide an SI handle and scan the backplane */
1543 			if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
1544 			     &bus->vars, &bus->varsz))) {
1545 				DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
1546 				goto fail;
1547 			}
1548 			dhdpcie_dongle_reset(bus);
1549 			is_pcie_reset = TRUE;
1550 		}
1551 
1552 		/* Pre ChipID access sequence, make sure that
1553 		 * bootloader is ready before ChipID access.
1554 		 */
1555 		bcmerror = dhdpcie_dongle_host_pre_chipid_access_sequence(osh, regsva);
1556 		if (bcmerror) {
1557 			DHD_ERROR(("%s: error - pre chipid access sequence error %d\n",
1558 				__FUNCTION__, bcmerror));
1559 			goto fail;
1560 		}
1561 
1562 		/* Set bar0 window to si_enum_base */
1563 		dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(devid));
1564 	}
1565 
1566 	/* si_attach() will provide an SI handle and scan the backplane */
1567 	if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
1568 	                           &bus->vars, &bus->varsz))) {
1569 		DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
1570 		goto fail;
1571 	}
1572 
1573 	/* Configure CTO Prevention functionality */
1574 #if defined(BCMFPGA_HW)
1575 	DHD_ERROR(("Disable CTO\n"));
1576 	bus->cto_enable = FALSE;
1577 #else
1578 #if defined(BCMPCIE_CTO_PREVENTION)
1579 	if (bus->sih->buscorerev >= 24) {
1580 		DHD_ERROR(("Enable CTO\n"));
1581 		bus->cto_enable = TRUE;
1582 	} else
1583 #endif /* BCMPCIE_CTO_PREVENTION */
1584 	{
1585 		DHD_ERROR(("Disable CTO\n"));
1586 		bus->cto_enable = FALSE;
1587 	}
1588 #endif /* BCMFPGA_HW */
1589 
1590 	if (PCIECTO_ENAB(bus)) {
1591 		dhdpcie_cto_init(bus, TRUE);
1592 	}
1593 
1594 	/* Storing secureboot capability */
1595 	bus->sih->secureboot = isset(&secureboot, PCIECFGREG_SECURE_MODE_SHIFT);
1596 
1597 	if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) {
1598 		/*
1599 		 * HW JIRA - CRWLPCIEGEN2-672
1600 		 * Producer Index Feature which is used by F1 gets reset on F0 FLR
1601 		 * fixed in REV68
1602 		 */
1603 		if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
1604 			dhdpcie_ssreset_dis_enum_rst(bus);
1605 		}
1606 
1607 		/* IOV_DEVRESET could exercise si_detach()/si_attach() again so reset
1608 		*   dhdpcie_bus_release_dongle() --> si_detach()
1609 		*   dhdpcie_dongle_attach() --> si_attach()
1610 		*/
1611 		bus->pwr_req_ref = 0;
1612 	}
1613 
1614 	if (MULTIBP_ENAB(bus->sih)) {
1615 		dhd_bus_pcie_pwr_req_nolock(bus);
1616 	}
1617 
1618 	/* Get info on the ARM and SOCRAM cores... */
1619 	/* Should really be qualified by device id */
1620 	if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
1621 	    (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
1622 	    (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
1623 	    (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
1624 		bus->armrev = si_corerev(bus->sih);
1625 		bus->coreid = si_coreid(bus->sih);
1626 	} else {
1627 		DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
1628 		goto fail;
1629 	}
1630 
1631 	/* CA7 requires coherent bits on */
1632 	if (bus->coreid == ARMCA7_CORE_ID) {
1633 		val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
1634 		dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
1635 			(val | PCIE_BARCOHERENTACCEN_MASK));
1636 	}
1637 
1638 	/* Olympic EFI requirement - stop driver load if FW is already running
1639 	*  need to do this here before pcie_watchdog_reset, because
1640 	*  pcie_watchdog_reset will put the ARM back into halt state
1641 	*/
1642 	if (!dhdpcie_is_arm_halted(bus)) {
1643 		DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n",
1644 				__FUNCTION__));
1645 		goto fail;
1646 	}
1647 
1648 	BCM_REFERENCE(dongle_isolation);
1649 
1650 	/* For inbuilt drivers pcie clk req will be done by RC,
1651 	 * so do not do clkreq from dhd
1652 	 */
1653 	if (dhd_download_fw_on_driverload)
1654 	{
1655 		/* Enable CLKREQ# */
1656 		dhdpcie_clkreq(bus->osh, 1, 1);
1657 	}
1658 
1659 	/*
1660 	 * bus->dhd will be NULL if it is called from dhd_bus_attach, so need to reset
1661 	 * without checking dongle_isolation flag, but if it is called via some other path
1662 	 * like quiesce FLR, then based on dongle_isolation flag, watchdog_reset should
1663 	 * be called.
1664 	 */
1665 	if (bus->dhd == NULL) {
1666 		/* dhd_attach not yet happened, do watchdog reset */
1667 		dongle_isolation = FALSE;
1668 	} else {
1669 		dongle_isolation = bus->dhd->dongle_isolation;
1670 	}
1671 
1672 #ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH
1673 	/*
1674 	 * Issue CC watchdog to reset all the cores on the chip - similar to rmmod dhd
1675 	 * This is required to avoid spurious interrupts to the Host and bring back
1676 	 * dongle to a sane state (on host soft-reboot / watchdog-reboot).
1677 	 */
1678 	if (dongle_isolation == FALSE && is_pcie_reset == FALSE) {
1679 		dhdpcie_dongle_reset(bus);
1680 	}
1681 #endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */
1682 
1683 	/* need to set the force_bt_quiesce flag here
1684 	 * before calling dhdpcie_dongle_flr_or_pwr_toggle
1685 	 */
1686 	bus->force_bt_quiesce = TRUE;
1687 	/*
1688 	 * For buscorerev = 66 and after, F0 FLR should be done independent from F1.
1689 	 * So don't need BT quiesce.
1690 	 */
1691 	if (bus->sih->buscorerev >= 66) {
1692 		bus->force_bt_quiesce = FALSE;
1693 	}
1694 
1695 	dhdpcie_dongle_flr_or_pwr_toggle(bus);
1696 
1697 #ifdef CHIPS_CUSTOMER_HW6
1698 	dhdpcie_bus_mpu_disable(bus);
1699 #endif /* CHIPS_CUSTOMER_HW6 */
1700 
1701 	si_setcore(bus->sih, PCIE2_CORE_ID, 0);
1702 	sbpcieregs = (sbpcieregs_t*)(bus->regs);
1703 
1704 	/* WAR where the BAR1 window may not be sized properly */
1705 	W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
1706 	val = R_REG(osh, &sbpcieregs->configdata);
1707 	W_REG(osh, &sbpcieregs->configdata, val);
1708 
1709 	if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
1710 		/* Only set dongle RAMSIZE to default value when BMC vs ARM usage of SYSMEM is not
1711 		 * adjusted.
1712 		 */
1713 		if (!bus->ramsize_adjusted) {
1714 			if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) {
1715 				DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__));
1716 				goto fail;
1717 			}
1718 			switch ((uint16)bus->sih->chip) {
1719 #ifdef CHIPS_CUSTOMER_HW6
1720 				case BCM4368_CHIP_ID:
1721 					bus->dongle_ram_base = CA7_4368_RAM_BASE;
1722 					bus->orig_ramsize = 0x1c0000;
1723 					break;
1724 				CASE_BCM4367_CHIP:
1725 					bus->dongle_ram_base = CA7_4367_RAM_BASE;
1726 					bus->orig_ramsize = 0x1e0000;
1727 					break;
1728 #endif /* CHIPS_CUSTOMER_HW6 */
1729 				default:
1730 					/* also populate base address */
1731 					bus->dongle_ram_base = CA7_4365_RAM_BASE;
1732 					bus->orig_ramsize = 0x1c0000; /* Reserve 1.75MB for CA7 */
1733 					break;
1734 			}
1735 		}
1736 	} else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
1737 		if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
1738 			DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
1739 			goto fail;
1740 		}
1741 	} else {
1742 		/* cr4 has a different way to find the RAM size from TCM's */
1743 		if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
1744 			DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
1745 			goto fail;
1746 		}
1747 		/* also populate base address */
1748 		switch ((uint16)bus->sih->chip) {
1749 		case BCM4339_CHIP_ID:
1750 		case BCM4335_CHIP_ID:
1751 			bus->dongle_ram_base = CR4_4335_RAM_BASE;
1752 			break;
1753 		case BCM4358_CHIP_ID:
1754 		case BCM4354_CHIP_ID:
1755 		case BCM43567_CHIP_ID:
1756 		case BCM43569_CHIP_ID:
1757 		case BCM4350_CHIP_ID:
1758 		case BCM43570_CHIP_ID:
1759 			bus->dongle_ram_base = CR4_4350_RAM_BASE;
1760 			break;
1761 		case BCM4360_CHIP_ID:
1762 			bus->dongle_ram_base = CR4_4360_RAM_BASE;
1763 			break;
1764 
1765 		case BCM4364_CHIP_ID:
1766 			bus->dongle_ram_base = CR4_4364_RAM_BASE;
1767 			break;
1768 
1769 		CASE_BCM4345_CHIP:
1770 			bus->dongle_ram_base = (bus->sih->chiprev < 6)  /* changed at 4345C0 */
1771 				? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE;
1772 			break;
1773 		CASE_BCM43602_CHIP:
1774 			bus->dongle_ram_base = CR4_43602_RAM_BASE;
1775 			break;
1776 		case BCM4349_CHIP_GRPID:
1777 			/* RAM based changed from 4349c0(revid=9) onwards */
1778 			bus->dongle_ram_base = ((bus->sih->chiprev < 9) ?
1779 				CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9);
1780 			break;
1781 		case BCM4347_CHIP_ID:
1782 		case BCM4357_CHIP_ID:
1783 		case BCM4361_CHIP_ID:
1784 			bus->dongle_ram_base = CR4_4347_RAM_BASE;
1785 			break;
1786 		case BCM4362_CHIP_ID:
1787 			bus->dongle_ram_base = CR4_4362_RAM_BASE;
1788 			break;
1789 		case BCM43751_CHIP_ID:
1790 			bus->dongle_ram_base = CR4_43751_RAM_BASE;
1791 			break;
1792 
1793 		case BCM4373_CHIP_ID:
1794 			bus->dongle_ram_base = CR4_4373_RAM_BASE;
1795 			break;
1796 #ifdef CHIPS_CUSTOMER_HW6
1797 		case BCM4378_CHIP_GRPID:
1798 			bus->dongle_ram_base = CR4_4378_RAM_BASE;
1799 			break;
1800 		case BCM4377_CHIP_ID:
1801 			bus->dongle_ram_base = CR4_4377_RAM_BASE;
1802 			break;
1803 #endif /* CHIPS_CUSTOMER_HW6 */
1804 		case BCM4375_CHIP_ID:
1805 		case BCM4369_CHIP_ID:
1806 			bus->dongle_ram_base = CR4_4369_RAM_BASE;
1807 			break;
1808 		case CYW55560_CHIP_ID:
1809 			bus->dongle_ram_base = CR4_55560_RAM_BASE;
1810 			break;
1811 		default:
1812 			bus->dongle_ram_base = 0;
1813 			DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
1814 			           __FUNCTION__, bus->dongle_ram_base));
1815 		}
1816 	}
1817 
1818 	/* 55560, Dedicated space for TCAM patching and TRX Hader at RAMBASE */
1819 	/* TCAM Patching - 2048[2K], TRX Header - 32Bytes */
1820 	if (bus->sih->chip == CYW55560_CHIP_ID) {
1821 		bus->orig_ramsize -= (CR4_55560_TCAM_SZ + CR4_55560_TRX_HDR_SZ);
1822 	}
1823 
1824 	bus->ramsize = bus->orig_ramsize;
1825 	if (dhd_dongle_memsize)
1826 		dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize);
1827 
1828 	if (bus->ramsize > DONGLE_TCM_MAP_SIZE) {
1829 		DHD_ERROR(("%s : invalid ramsize %d(0x%x) is returned from dongle\n",
1830 				__FUNCTION__, bus->ramsize, bus->ramsize));
1831 		goto fail;
1832 	}
1833 
1834 	DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
1835 	           bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
1836 
1837 	bus->srmemsize = si_socram_srmem_size(bus->sih);
1838 
1839 	dhdpcie_bus_intr_init(bus);
1840 
1841 	/* Set the poll and/or interrupt flags */
1842 	bus->intr = (bool)dhd_intr;
1843 
1844 	bus->idma_enabled = TRUE;
1845 	bus->ifrm_enabled = TRUE;
1846 	DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__));
1847 
1848 	if (MULTIBP_ENAB(bus->sih)) {
1849 		dhd_bus_pcie_pwr_req_clear_nolock(bus);
1850 
1851 		/*
1852 		 * One time clearing of Common Power Domain since HW default is set
1853 		 * Needs to be after FLR because FLR resets PCIe enum back to HW defaults
1854 		 * for 4378B0 (rev 68).
1855 		 * On 4378A0 (rev 66), PCIe enum reset is disabled due to CRWLPCIEGEN2-672
1856 		 */
1857 		si_srpwr_request(bus->sih, SRPWR_DMN0_PCIE_MASK, 0);
1858 
1859 		/*
1860 		 * WAR to fix ARM cold boot;
1861 		 * Assert WL domain in DAR helps but not enum
1862 		 */
1863 		if (bus->sih->buscorerev >= 68) {
1864 			dhd_bus_pcie_pwr_req_wl_domain(bus, TRUE);
1865 		}
1866 	}
1867 
1868 	return 0;
1869 
1870 fail:
1871 	if (bus->sih != NULL) {
1872 		if (MULTIBP_ENAB(bus->sih)) {
1873 			dhd_bus_pcie_pwr_req_clear_nolock(bus);
1874 		}
1875 		/* for EFI even if there is an error, load still succeeds
1876 		* so si_detach should not be called here, it is called during unload
1877 		*/
1878 		si_detach(bus->sih);
1879 		bus->sih = NULL;
1880 	}
1881 	DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__));
1882 	return -1;
1883 }
1884 
1885 int
dhpcie_bus_unmask_interrupt(dhd_bus_t * bus)1886 dhpcie_bus_unmask_interrupt(dhd_bus_t *bus)
1887 {
1888 	dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB);
1889 	return 0;
1890 }
1891 int
dhpcie_bus_mask_interrupt(dhd_bus_t * bus)1892 dhpcie_bus_mask_interrupt(dhd_bus_t *bus)
1893 {
1894 	dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0);
1895 	return 0;
1896 }
1897 
1898 /* Non atomic function, caller should hold appropriate lock */
1899 void
dhdpcie_bus_intr_enable(dhd_bus_t * bus)1900 dhdpcie_bus_intr_enable(dhd_bus_t *bus)
1901 {
1902 	DHD_TRACE(("%s Enter\n", __FUNCTION__));
1903 	if (bus) {
1904 		if (bus->sih && !bus->is_linkdown) {
1905 			/* Skip after recieving D3 ACK */
1906 			if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
1907 				return;
1908 			}
1909 			if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
1910 				(bus->sih->buscorerev == 4)) {
1911 				dhpcie_bus_unmask_interrupt(bus);
1912 			} else {
1913 	#if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
1914 				dhd_bus_mmio_trace(bus, bus->pcie_mailbox_mask,
1915 					bus->def_intmask, TRUE);
1916 	#endif
1917 				si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
1918 					bus->def_intmask, bus->def_intmask);
1919 			}
1920 		}
1921 
1922 	}
1923 
1924 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
1925 }
1926 
1927 /* Non atomic function, caller should hold appropriate lock */
1928 void
dhdpcie_bus_intr_disable(dhd_bus_t * bus)1929 dhdpcie_bus_intr_disable(dhd_bus_t *bus)
1930 {
1931 	DHD_TRACE(("%s Enter\n", __FUNCTION__));
1932 	if (bus && bus->sih && !bus->is_linkdown) {
1933 		/* Skip after recieving D3 ACK */
1934 		if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
1935 			return;
1936 		}
1937 		if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
1938 			(bus->sih->buscorerev == 4)) {
1939 			dhpcie_bus_mask_interrupt(bus);
1940 		} else {
1941 			si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
1942 				bus->def_intmask, 0);
1943 		}
1944 	}
1945 
1946 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
1947 }
1948 
1949 /*
1950  *  dhdpcie_advertise_bus_cleanup advertises that clean up is under progress
1951  * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts
1952  * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for
1953  * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
1954  * they will exit from there itself without marking dhd_bus_busy_state as BUSY.
1955  */
1956 void
dhdpcie_advertise_bus_cleanup(dhd_pub_t * dhdp)1957 dhdpcie_advertise_bus_cleanup(dhd_pub_t	 *dhdp)
1958 {
1959 	unsigned long flags;
1960 	int timeleft;
1961 
1962 #ifdef DHD_PCIE_RUNTIMEPM
1963 	dhdpcie_runtime_bus_wake(dhdp, TRUE, dhdpcie_advertise_bus_cleanup);
1964 #endif /* DHD_PCIE_RUNTIMEPM */
1965 
1966 	dhdp->dhd_watchdog_ms_backup = dhd_watchdog_ms;
1967 	if (dhdp->dhd_watchdog_ms_backup) {
1968 		DHD_ERROR(("%s: Disabling wdtick before dhd deinit\n",
1969 			__FUNCTION__));
1970 		dhd_os_wd_timer(dhdp, 0);
1971 	}
1972 	if (dhdp->busstate != DHD_BUS_DOWN) {
1973 		DHD_GENERAL_LOCK(dhdp, flags);
1974 		dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
1975 		DHD_GENERAL_UNLOCK(dhdp, flags);
1976 	}
1977 
1978 	timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
1979 	if ((timeleft == 0) || (timeleft == 1)) {
1980 		DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
1981 				__FUNCTION__, dhdp->dhd_bus_busy_state));
1982 		ASSERT(0);
1983 	}
1984 
1985 	return;
1986 }
1987 
1988 static void
dhdpcie_bus_remove_prep(dhd_bus_t * bus)1989 dhdpcie_bus_remove_prep(dhd_bus_t *bus)
1990 {
1991 	unsigned long flags;
1992 	DHD_TRACE(("%s Enter\n", __FUNCTION__));
1993 
1994 	DHD_GENERAL_LOCK(bus->dhd, flags);
1995 	DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
1996 	bus->dhd->busstate = DHD_BUS_DOWN;
1997 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
1998 
1999 	dhd_os_sdlock(bus->dhd);
2000 
2001 	if (bus->sih && !bus->dhd->dongle_isolation) {
2002 		if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev) &&
2003 		    bus->sih->chip != CYW55560_CHIP_ID) {
2004 			dhd_bus_pcie_pwr_req_reload_war(bus);
2005 		}
2006 
2007 		/* Has insmod fails after rmmod issue in Brix Android */
2008 #if !defined(OEM_ANDROID) && !defined(ANDROID)
2009 		/* HW4347-909 */
2010 		if ((bus->sih->buscorerev == 19) || (bus->sih->buscorerev == 23)) {
2011 			/* Set PCIE TRefUp time to 100us for 4347 */
2012 			pcie_set_trefup_time_100us(bus->sih);
2013 		}
2014 
2015 		/* disable fast lpo from 4347 */
2016 		/* For 4378/4387, do not disable fast lpo because we always enable fast lpo.
2017 		 * it causes insmod/rmmod reload failure.
2018 		 */
2019 		if ((PMUREV(bus->sih->pmurev) > 31) &&
2020 		    (bus->sih->buscorerev != 66) &&
2021 		    (bus->sih->buscorerev != 68) &&
2022 		    (bus->sih->buscorerev != 69) &&
2023 		    (bus->sih->buscorerev != 70)) {
2024 			si_pmu_fast_lpo_disable(bus->sih);
2025 		}
2026 #endif /* !OEM_ANDROID && !ANDROID */
2027 
2028 		/* if the pcie link is down, watchdog reset
2029 		* should not be done, as it may hang
2030 		*/
2031 
2032 		if (!bus->is_linkdown) {
2033 #ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH
2034 			/* for efi, depending on bt over pcie mode
2035 			*  we either power toggle or do F0 FLR
2036 			* from dhdpcie_bus_release dongle. So no need to
2037 			* do dongle reset from here
2038 			*/
2039 			dhdpcie_dongle_reset(bus);
2040 #endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */
2041 		}
2042 
2043 		bus->dhd->is_pcie_watchdog_reset = TRUE;
2044 	}
2045 
2046 	dhd_os_sdunlock(bus->dhd);
2047 
2048 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
2049 }
2050 
2051 void
dhd_init_bus_lock(dhd_bus_t * bus)2052 dhd_init_bus_lock(dhd_bus_t *bus)
2053 {
2054 	if (!bus->bus_lock) {
2055 		bus->bus_lock = dhd_os_spin_lock_init(bus->dhd->osh);
2056 	}
2057 }
2058 
2059 void
dhd_deinit_bus_lock(dhd_bus_t * bus)2060 dhd_deinit_bus_lock(dhd_bus_t *bus)
2061 {
2062 	if (bus->bus_lock) {
2063 		dhd_os_spin_lock_deinit(bus->dhd->osh, bus->bus_lock);
2064 		bus->bus_lock = NULL;
2065 	}
2066 }
2067 
2068 void
dhd_init_backplane_access_lock(dhd_bus_t * bus)2069 dhd_init_backplane_access_lock(dhd_bus_t *bus)
2070 {
2071 	if (!bus->backplane_access_lock) {
2072 		bus->backplane_access_lock = dhd_os_spin_lock_init(bus->dhd->osh);
2073 	}
2074 }
2075 
2076 void
dhd_deinit_backplane_access_lock(dhd_bus_t * bus)2077 dhd_deinit_backplane_access_lock(dhd_bus_t *bus)
2078 {
2079 	if (bus->backplane_access_lock) {
2080 		dhd_os_spin_lock_deinit(bus->dhd->osh, bus->backplane_access_lock);
2081 		bus->backplane_access_lock = NULL;
2082 	}
2083 }
2084 
2085 /** Detach and free everything */
2086 void
dhdpcie_bus_release(dhd_bus_t * bus)2087 dhdpcie_bus_release(dhd_bus_t *bus)
2088 {
2089 	bool dongle_isolation = FALSE;
2090 #ifdef BCMQT
2091 	uint buscorerev = 0;
2092 #endif /* BCMQT */
2093 	osl_t *osh = NULL;
2094 	unsigned long flags_bus;
2095 
2096 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2097 
2098 	if (bus) {
2099 
2100 		osh = bus->osh;
2101 		ASSERT(osh);
2102 
2103 		if (bus->dhd) {
2104 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
2105 			debugger_close();
2106 #endif /* DEBUGGER || DHD_DSCOPE */
2107 			dhdpcie_advertise_bus_cleanup(bus->dhd);
2108 			dongle_isolation = bus->dhd->dongle_isolation;
2109 			bus->dhd->is_pcie_watchdog_reset = FALSE;
2110 			dhdpcie_bus_remove_prep(bus);
2111 
2112 			if (bus->intr) {
2113 				DHD_BUS_LOCK(bus->bus_lock, flags_bus);
2114 				dhdpcie_bus_intr_disable(bus);
2115 				DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
2116 				dhdpcie_free_irq(bus);
2117 			}
2118 			dhd_deinit_bus_lock(bus);
2119 			dhd_deinit_backplane_access_lock(bus);
2120 #ifdef BCMQT
2121 			if (IDMA_ACTIVE(bus->dhd)) {
2122 			/**
2123 			 * On FPGA during exit path force set "IDMA Control Register"
2124 			 * to default value 0x0. Otherwise host dongle syc for IDMA fails
2125 			 * during next IDMA initilization(without system reboot)
2126 			 */
2127 				buscorerev = bus->sih->buscorerev;
2128 				si_corereg(bus->sih, bus->sih->buscoreidx,
2129 					IDMAControl(buscorerev), ~0, 0);
2130 			}
2131 #endif /* BCMQT */
2132 			/**
2133 			 * dhdpcie_bus_release_dongle free bus->sih  handle, which is needed to
2134 			 * access Dongle registers.
2135 			 * dhd_detach will communicate with dongle to delete flowring ..etc.
2136 			 * So dhdpcie_bus_release_dongle should be called only after the dhd_detach.
2137 			 */
2138 			dhd_detach(bus->dhd);
2139 			dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
2140 			dhd_free(bus->dhd);
2141 			bus->dhd = NULL;
2142 		}
2143 		/* unmap the regs and tcm here!! */
2144 		if (bus->regs) {
2145 			dhdpcie_bus_reg_unmap(osh, bus->regs, DONGLE_REG_MAP_SIZE);
2146 			bus->regs = NULL;
2147 		}
2148 		if (bus->tcm) {
2149 			dhdpcie_bus_reg_unmap(osh, bus->tcm, DONGLE_TCM_MAP_SIZE);
2150 			bus->tcm = NULL;
2151 		}
2152 
2153 		dhdpcie_bus_release_malloc(bus, osh);
2154 		/* Detach pcie shared structure */
2155 		if (bus->pcie_sh) {
2156 			MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
2157 			bus->pcie_sh = NULL;
2158 		}
2159 
2160 		if (bus->console.buf != NULL) {
2161 			MFREE(osh, bus->console.buf, bus->console.bufsize);
2162 		}
2163 
2164 		/* Finally free bus info */
2165 		MFREE(osh, bus, sizeof(dhd_bus_t));
2166 
2167 		g_dhd_bus = NULL;
2168 	}
2169 
2170 	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
2171 } /* dhdpcie_bus_release */
2172 
2173 void
dhdpcie_bus_release_dongle(dhd_bus_t * bus,osl_t * osh,bool dongle_isolation,bool reset_flag)2174 dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
2175 {
2176 	DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
2177 		bus->dhd, bus->dhd->dongle_reset));
2178 
2179 	if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) {
2180 		DHD_TRACE(("%s Exit\n", __FUNCTION__));
2181 		return;
2182 	}
2183 
2184 	if (bus->is_linkdown) {
2185 		DHD_ERROR(("%s : Skip release dongle due to linkdown \n", __FUNCTION__));
2186 		return;
2187 	}
2188 
2189 	if (bus->sih) {
2190 
2191 		if (!dongle_isolation &&
2192 			(bus->dhd && !bus->dhd->is_pcie_watchdog_reset)) {
2193 			dhdpcie_dongle_reset(bus);
2194 		}
2195 
2196 		dhdpcie_dongle_flr_or_pwr_toggle(bus);
2197 
2198 		if (bus->ltrsleep_on_unload) {
2199 			si_corereg(bus->sih, bus->sih->buscoreidx,
2200 				OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
2201 		}
2202 
2203 		if (bus->sih->buscorerev == 13)
2204 			 pcie_serdes_iddqdisable(bus->osh, bus->sih,
2205 			                         (sbpcieregs_t *) bus->regs);
2206 
2207 		/* For inbuilt drivers pcie clk req will be done by RC,
2208 		 * so do not do clkreq from dhd
2209 		 */
2210 		if (dhd_download_fw_on_driverload)
2211 		{
2212 			/* Disable CLKREQ# */
2213 			dhdpcie_clkreq(bus->osh, 1, 0);
2214 		}
2215 
2216 #ifdef PCIE_SUSPEND_DURING_DETACH
2217 		dhdpcie_bus_clock_stop(bus);
2218 #endif /* PCIE_SUSPEND_DURING_DETACH */
2219 
2220 		if (bus->sih != NULL) {
2221 			si_detach(bus->sih);
2222 			bus->sih = NULL;
2223 		}
2224 		if (bus->vars && bus->varsz)
2225 			MFREE(osh, bus->vars, bus->varsz);
2226 		bus->vars = NULL;
2227 	}
2228 
2229 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
2230 }
2231 
2232 uint32
dhdpcie_bus_cfg_read_dword(dhd_bus_t * bus,uint32 addr,uint32 size)2233 dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size)
2234 {
2235 	uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size);
2236 	return data;
2237 }
2238 
2239 /** 32 bit config write */
2240 void
dhdpcie_bus_cfg_write_dword(dhd_bus_t * bus,uint32 addr,uint32 size,uint32 data)2241 dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data)
2242 {
2243 	OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data);
2244 }
2245 
2246 void
dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t * bus,uint32 data)2247 dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data)
2248 {
2249 	OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data);
2250 }
2251 
2252 void
dhdpcie_bus_dongle_setmemsize(struct dhd_bus * bus,int mem_size)2253 dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
2254 {
2255 	int32 min_size =  DONGLE_MIN_MEMSIZE;
2256 	/* Restrict the memsize to user specified limit */
2257 	DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
2258 		dhd_dongle_memsize, min_size));
2259 	if ((dhd_dongle_memsize > min_size) &&
2260 		(dhd_dongle_memsize < (int32)bus->orig_ramsize))
2261 		bus->ramsize = dhd_dongle_memsize;
2262 }
2263 
2264 void
dhdpcie_bus_release_malloc(dhd_bus_t * bus,osl_t * osh)2265 dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh)
2266 {
2267 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2268 
2269 	if (bus->dhd && bus->dhd->dongle_reset)
2270 		return;
2271 
2272 	if (bus->vars && bus->varsz) {
2273 		MFREE(osh, bus->vars, bus->varsz);
2274 		bus->vars = NULL;
2275 	}
2276 
2277 	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
2278 	return;
2279 
2280 }
2281 
2282 /** Stop bus module: clear pending frames, disable data flow */
dhd_bus_stop(struct dhd_bus * bus,bool enforce_mutex)2283 void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
2284 {
2285 	unsigned long flags, flags_bus;
2286 
2287 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2288 
2289 	if (!bus->dhd)
2290 		return;
2291 
2292 	if (bus->dhd->busstate == DHD_BUS_DOWN) {
2293 		DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__));
2294 		goto done;
2295 	}
2296 
2297 	DHD_DISABLE_RUNTIME_PM(bus->dhd);
2298 
2299 	DHD_GENERAL_LOCK(bus->dhd, flags);
2300 	DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
2301 	bus->dhd->busstate = DHD_BUS_DOWN;
2302 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
2303 
2304 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2305 	atomic_set(&bus->dhd->block_bus, TRUE);
2306 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2307 
2308 	DHD_BUS_LOCK(bus->bus_lock, flags_bus);
2309 	dhdpcie_bus_intr_disable(bus);
2310 	DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
2311 
2312 	if (!bus->is_linkdown) {
2313 		uint32 status;
2314 		status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
2315 		dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
2316 	}
2317 
2318 	if (!dhd_download_fw_on_driverload) {
2319 		dhd_dpc_kill(bus->dhd);
2320 	}
2321 
2322 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2323 	pm_runtime_disable(dhd_bus_to_dev(bus));
2324 	pm_runtime_set_suspended(dhd_bus_to_dev(bus));
2325 	pm_runtime_enable(dhd_bus_to_dev(bus));
2326 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2327 
2328 	/* Clear rx control and wake any waiters */
2329 	dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT);
2330 	dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP);
2331 
2332 done:
2333 	return;
2334 }
2335 
2336 /**
2337  * Watchdog timer function.
2338  * @param dhd   Represents a specific hardware (dongle) instance that this DHD manages
2339  */
dhd_bus_watchdog(dhd_pub_t * dhd)2340 bool dhd_bus_watchdog(dhd_pub_t *dhd)
2341 {
2342 	unsigned long flags;
2343 	dhd_bus_t *bus = dhd->bus;
2344 
2345 	DHD_GENERAL_LOCK(dhd, flags);
2346 	if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) ||
2347 			DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd)) {
2348 		DHD_GENERAL_UNLOCK(dhd, flags);
2349 		return FALSE;
2350 	}
2351 	DHD_BUS_BUSY_SET_IN_WD(dhd);
2352 	DHD_GENERAL_UNLOCK(dhd, flags);
2353 
2354 #ifdef DHD_PCIE_RUNTIMEPM
2355 	dhdpcie_runtime_bus_wake(dhd, TRUE, __builtin_return_address(0));
2356 #endif /* DHD_PCIE_RUNTIMEPM */
2357 
2358 	/* Poll for console output periodically */
2359 	if (dhd->busstate == DHD_BUS_DATA &&
2360 		dhd->dhd_console_ms != 0 &&
2361 		bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE) {
2362 		bus->console.count += dhd_watchdog_ms;
2363 		if (bus->console.count >= dhd->dhd_console_ms) {
2364 			bus->console.count -= dhd->dhd_console_ms;
2365 
2366 			if (MULTIBP_ENAB(bus->sih)) {
2367 				dhd_bus_pcie_pwr_req(bus);
2368 			}
2369 
2370 			/* Make sure backplane clock is on */
2371 			if (dhdpcie_bus_readconsole(bus) < 0) {
2372 				dhd->dhd_console_ms = 0; /* On error, stop trying */
2373 			}
2374 
2375 			if (MULTIBP_ENAB(bus->sih)) {
2376 				dhd_bus_pcie_pwr_req_clear(bus);
2377 			}
2378 		}
2379 	}
2380 
2381 	DHD_GENERAL_LOCK(dhd, flags);
2382 	DHD_BUS_BUSY_CLEAR_IN_WD(dhd);
2383 	dhd_os_busbusy_wake(dhd);
2384 	DHD_GENERAL_UNLOCK(dhd, flags);
2385 
2386 	return TRUE;
2387 } /* dhd_bus_watchdog */
2388 
2389 #if defined(SUPPORT_MULTIPLE_REVISION)
concate_revision_bcm4358(dhd_bus_t * bus,char * fw_path,char * nv_path)2390 static int concate_revision_bcm4358(dhd_bus_t *bus, char *fw_path, char *nv_path)
2391 {
2392 	uint32 chiprev;
2393 #if defined(SUPPORT_MULTIPLE_CHIPS)
2394 	char chipver_tag[20] = "_4358";
2395 #else
2396 	char chipver_tag[10] = {0, };
2397 #endif /* SUPPORT_MULTIPLE_CHIPS */
2398 
2399 	chiprev = dhd_bus_chiprev(bus);
2400 	if (chiprev == 0) {
2401 		DHD_ERROR(("----- CHIP 4358 A0 -----\n"));
2402 		strcat(chipver_tag, "_a0");
2403 	} else if (chiprev == 1) {
2404 		DHD_ERROR(("----- CHIP 4358 A1 -----\n"));
2405 #if defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS)
2406 		strcat(chipver_tag, "_a1");
2407 #endif /* defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS) */
2408 	} else if (chiprev == 3) {
2409 		DHD_ERROR(("----- CHIP 4358 A3 -----\n"));
2410 #if defined(SUPPORT_MULTIPLE_CHIPS)
2411 		strcat(chipver_tag, "_a3");
2412 #endif /* SUPPORT_MULTIPLE_CHIPS */
2413 	} else {
2414 		DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chiprev));
2415 	}
2416 
2417 	strcat(fw_path, chipver_tag);
2418 
2419 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK)
2420 	if (chiprev == 1 || chiprev == 3) {
2421 		int ret = dhd_check_module_b85a();
2422 		if ((chiprev == 1) && (ret < 0)) {
2423 			memset(chipver_tag, 0x00, sizeof(chipver_tag));
2424 			strcat(chipver_tag, "_b85");
2425 			strcat(chipver_tag, "_a1");
2426 		}
2427 	}
2428 
2429 	DHD_ERROR(("%s: chipver_tag %s \n", __FUNCTION__, chipver_tag));
2430 #endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */
2431 
2432 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
2433 	if (system_rev >= 10) {
2434 		DHD_ERROR(("----- Board Rev  [%d]-----\n", system_rev));
2435 		strcat(chipver_tag, "_r10");
2436 	}
2437 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
2438 	strcat(nv_path, chipver_tag);
2439 
2440 	return 0;
2441 }
2442 
concate_revision_bcm4359(dhd_bus_t * bus,char * fw_path,char * nv_path)2443 static int concate_revision_bcm4359(dhd_bus_t *bus, char *fw_path, char *nv_path)
2444 {
2445 	uint32 chip_ver;
2446 	char chipver_tag[10] = {0, };
2447 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2448 	defined(SUPPORT_BCM4359_MIXED_MODULES)
2449 	int module_type = -1;
2450 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2451 
2452 	chip_ver = bus->sih->chiprev;
2453 	if (chip_ver == 4) {
2454 		DHD_ERROR(("----- CHIP 4359 B0 -----\n"));
2455 		strncat(chipver_tag, "_b0", strlen("_b0"));
2456 	} else if (chip_ver == 5) {
2457 		DHD_ERROR(("----- CHIP 4359 B1 -----\n"));
2458 		strncat(chipver_tag, "_b1", strlen("_b1"));
2459 	} else if (chip_ver == 9) {
2460 		DHD_ERROR(("----- CHIP 4359 C0 -----\n"));
2461 		strncat(chipver_tag, "_c0", strlen("_c0"));
2462 	} else {
2463 		DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver));
2464 		return -1;
2465 	}
2466 
2467 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2468 	defined(SUPPORT_BCM4359_MIXED_MODULES)
2469 	module_type =  dhd_check_module_b90();
2470 
2471 	switch (module_type) {
2472 		case BCM4359_MODULE_TYPE_B90B:
2473 			strcat(fw_path, chipver_tag);
2474 			break;
2475 		case BCM4359_MODULE_TYPE_B90S:
2476 		default:
2477 			/*
2478 			 * .cid.info file not exist case,
2479 			 * loading B90S FW force for initial MFG boot up.
2480 			*/
2481 			if (chip_ver == 5) {
2482 				strncat(fw_path, "_b90s", strlen("_b90s"));
2483 			}
2484 			strcat(fw_path, chipver_tag);
2485 			strcat(nv_path, chipver_tag);
2486 			break;
2487 	}
2488 #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2489 	strcat(fw_path, chipver_tag);
2490 	strcat(nv_path, chipver_tag);
2491 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2492 
2493 	return 0;
2494 }
2495 
2496 #if defined(USE_CID_CHECK)
2497 
2498 #define MAX_EXTENSION 20
2499 #define MODULE_BCM4361_INDEX	3
2500 #define CHIP_REV_A0	1
2501 #define CHIP_REV_A1	2
2502 #define CHIP_REV_B0	3
2503 #define CHIP_REV_B1	4
2504 #define CHIP_REV_B2	5
2505 #define CHIP_REV_C0	6
2506 #define BOARD_TYPE_EPA				0x080f
2507 #define BOARD_TYPE_IPA				0x0827
2508 #define BOARD_TYPE_IPA_OLD			0x081a
2509 #define DEFAULT_CIDINFO_FOR_EPA		"r00a_e000_a0_ePA"
2510 #define DEFAULT_CIDINFO_FOR_IPA		"r00a_e000_a0_iPA"
2511 #define DEFAULT_CIDINFO_FOR_A1		"r01a_e30a_a1"
2512 #define DEFAULT_CIDINFO_FOR_B0		"r01i_e32_b0"
2513 #define MAX_VID_LEN					8
2514 #define CIS_TUPLE_HDR_LEN		2
2515 #if defined(BCM4361_CHIP)
2516 #define CIS_TUPLE_START_ADDRESS		0x18011110
2517 #define CIS_TUPLE_END_ADDRESS		0x18011167
2518 #elif defined(BCM4375_CHIP)
2519 #define CIS_TUPLE_START_ADDRESS		0x18011120
2520 #define CIS_TUPLE_END_ADDRESS		0x18011177
2521 #endif /* defined(BCM4361_CHIP) */
2522 #define CIS_TUPLE_MAX_COUNT		(uint32)((CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\
2523 						+ 1) / sizeof(uint32))
2524 #define CIS_TUPLE_TAG_START			0x80
2525 #define CIS_TUPLE_TAG_VENDOR		0x81
2526 #define CIS_TUPLE_TAG_BOARDTYPE		0x1b
2527 #define CIS_TUPLE_TAG_LENGTH		1
2528 #define NVRAM_FEM_MURATA			"_murata"
2529 #define CID_FEM_MURATA				"_mur_"
2530 
2531 typedef struct cis_tuple_format {
2532 	uint8	id;
2533 	uint8	len;	/* total length of tag and data */
2534 	uint8	tag;
2535 	uint8	data[1];
2536 } cis_tuple_format_t;
2537 
2538 typedef struct {
2539 	char cid_ext[MAX_EXTENSION];
2540 	char nvram_ext[MAX_EXTENSION];
2541 	char fw_ext[MAX_EXTENSION];
2542 } naming_info_t;
2543 
2544 naming_info_t bcm4361_naming_table[] = {
2545 	{ {""}, {""}, {""} },
2546 	{ {"r00a_e000_a0_ePA"}, {"_a0_ePA"}, {"_a0_ePA"} },
2547 	{ {"r00a_e000_a0_iPA"}, {"_a0"}, {"_a1"} },
2548 	{ {"r01a_e30a_a1"}, {"_r01a_a1"}, {"_a1"} },
2549 	{ {"r02a_e30a_a1"}, {"_r02a_a1"}, {"_a1"} },
2550 	{ {"r02c_e30a_a1"}, {"_r02c_a1"}, {"_a1"} },
2551 	{ {"r01d_e31_b0"}, {"_r01d_b0"}, {"_b0"} },
2552 	{ {"r01f_e31_b0"}, {"_r01f_b0"}, {"_b0"} },
2553 	{ {"r02g_e31_b0"}, {"_r02g_b0"}, {"_b0"} },
2554 	{ {"r01h_e32_b0"}, {"_r01h_b0"}, {"_b0"} },
2555 	{ {"r01i_e32_b0"}, {"_r01i_b0"}, {"_b0"} },
2556 	{ {"r02j_e32_b0"}, {"_r02j_b0"}, {"_b0"} },
2557 	{ {"r012_1kl_a1"}, {"_r012_a1"}, {"_a1"} },
2558 	{ {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
2559 	{ {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
2560 	{ {"r014_1kl_b0"}, {"_r014_b0"}, {"_b0"} },
2561 	{ {"r015_1kl_b0"}, {"_r015_b0"}, {"_b0"} },
2562 	{ {"r020_1kl_b0"}, {"_r020_b0"}, {"_b0"} },
2563 	{ {"r021_1kl_b0"}, {"_r021_b0"}, {"_b0"} },
2564 	{ {"r022_1kl_b0"}, {"_r022_b0"}, {"_b0"} },
2565 	{ {"r023_1kl_b0"}, {"_r023_b0"}, {"_b0"} },
2566 	{ {"r024_1kl_b0"}, {"_r024_b0"}, {"_b0"} },
2567 	{ {"r030_1kl_b0"}, {"_r030_b0"}, {"_b0"} },
2568 	{ {"r031_1kl_b0"}, {"_r030_b0"}, {"_b0"} },	/* exceptional case : r31 -> r30 */
2569 	{ {"r032_1kl_b0"}, {"_r032_b0"}, {"_b0"} },
2570 	{ {"r033_1kl_b0"}, {"_r033_b0"}, {"_b0"} },
2571 	{ {"r034_1kl_b0"}, {"_r034_b0"}, {"_b0"} },
2572 	{ {"r02a_e32a_b2"}, {"_r02a_b2"}, {"_b2"} },
2573 	{ {"r02b_e32a_b2"}, {"_r02b_b2"}, {"_b2"} },
2574 	{ {"r020_1qw_b2"}, {"_r020_b2"}, {"_b2"} },
2575 	{ {"r021_1qw_b2"}, {"_r021_b2"}, {"_b2"} },
2576 	{ {"r022_1qw_b2"}, {"_r022_b2"}, {"_b2"} },
2577 	{ {"r031_1qw_b2"}, {"_r031_b2"}, {"_b2"} },
2578 	{ {"r032_1qw_b2"}, {"_r032_b2"}, {"_b2"} },
2579 	{ {"r041_1qw_b2"}, {"_r041_b2"}, {"_b2"} }
2580 };
2581 
2582 #define MODULE_BCM4375_INDEX	3
2583 
2584 naming_info_t bcm4375_naming_table[] = {
2585 	{ {""}, {""}, {""} },
2586 	{ {"e41_es11"}, {"_ES00_semco_b0"}, {"_b0"} },
2587 	{ {"e43_es33"}, {"_ES01_semco_b0"}, {"_b0"} },
2588 	{ {"e43_es34"}, {"_ES02_semco_b0"}, {"_b0"} },
2589 	{ {"e43_es35"}, {"_ES02_semco_b0"}, {"_b0"} },
2590 	{ {"e43_es36"}, {"_ES03_semco_b0"}, {"_b0"} },
2591 	{ {"e43_cs41"}, {"_CS00_semco_b1"}, {"_b1"} },
2592 	{ {"e43_cs51"}, {"_CS01_semco_b1"}, {"_b1"} },
2593 	{ {"e43_cs53"}, {"_CS01_semco_b1"}, {"_b1"} },
2594 	{ {"e43_cs61"}, {"_CS00_skyworks_b1"}, {"_b1"} },
2595 	{ {"1rh_es10"}, {"_1rh_es10_b0"}, {"_b0"} },
2596 	{ {"1rh_es11"}, {"_1rh_es11_b0"}, {"_b0"} },
2597 	{ {"1rh_es12"}, {"_1rh_es12_b0"}, {"_b0"} },
2598 	{ {"1rh_es13"}, {"_1rh_es13_b0"}, {"_b0"} },
2599 	{ {"1rh_es20"}, {"_1rh_es20_b0"}, {"_b0"} },
2600 	{ {"1rh_es32"}, {"_1rh_es32_b0"}, {"_b0"} },
2601 	{ {"1rh_es41"}, {"_1rh_es41_b1"}, {"_b1"} },
2602 	{ {"1rh_es42"}, {"_1rh_es42_b1"}, {"_b1"} },
2603 	{ {"1rh_es43"}, {"_1rh_es43_b1"}, {"_b1"} },
2604 	{ {"1rh_es44"}, {"_1rh_es44_b1"}, {"_b1"} }
2605 };
2606 
2607 static naming_info_t *
dhd_find_naming_info(naming_info_t table[],int table_size,char * module_type)2608 dhd_find_naming_info(naming_info_t table[], int table_size, char *module_type)
2609 {
2610 	int index_found = 0, i = 0;
2611 
2612 	if (module_type && strlen(module_type) > 0) {
2613 		for (i = 1; i < table_size; i++) {
2614 			if (!strncmp(table[i].cid_ext, module_type, strlen(table[i].cid_ext))) {
2615 				index_found = i;
2616 				break;
2617 			}
2618 		}
2619 	}
2620 
2621 	DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found));
2622 
2623 	return &table[index_found];
2624 }
2625 
2626 static naming_info_t *
dhd_find_naming_info_by_cid(naming_info_t table[],int table_size,char * cid_info)2627 dhd_find_naming_info_by_cid(naming_info_t table[], int table_size,
2628 	char *cid_info)
2629 {
2630 	int index_found = 0, i = 0;
2631 	char *ptr;
2632 
2633 	/* truncate extension */
2634 	for (i = 1, ptr = cid_info; i < MODULE_BCM4361_INDEX && ptr; i++) {
2635 		ptr = bcmstrstr(ptr, "_");
2636 		if (ptr) {
2637 			ptr++;
2638 		}
2639 	}
2640 
2641 	for (i = 1; i < table_size && ptr; i++) {
2642 		if (!strncmp(table[i].cid_ext, ptr, strlen(table[i].cid_ext))) {
2643 			index_found = i;
2644 			break;
2645 		}
2646 	}
2647 
2648 	DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found));
2649 
2650 	return &table[index_found];
2651 }
2652 
2653 static int
dhd_parse_board_information_bcm(dhd_bus_t * bus,int * boardtype,unsigned char * vid,int * vid_length)2654 dhd_parse_board_information_bcm(dhd_bus_t *bus, int *boardtype,
2655 	unsigned char *vid, int *vid_length)
2656 {
2657 	int boardtype_backplane_addr[] = {
2658 		0x18010324, /* OTP Control 1 */
2659 		0x18012618, /* PMU min resource mask */
2660 	};
2661 	int boardtype_backplane_data[] = {
2662 		0x00fa0000,
2663 		0x0e4fffff /* Keep on ARMHTAVAIL */
2664 	};
2665 	int int_val = 0, i = 0;
2666 	cis_tuple_format_t *tuple;
2667 	int totlen, len;
2668 	uint32 raw_data[CIS_TUPLE_MAX_COUNT];
2669 
2670 	for (i = 0; i < ARRAYSIZE(boardtype_backplane_addr); i++) {
2671 		/* Write new OTP and PMU configuration */
2672 		if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int),
2673 				&boardtype_backplane_data[i], FALSE) != BCME_OK) {
2674 			DHD_ERROR(("invalid size/addr combination\n"));
2675 			return BCME_ERROR;
2676 		}
2677 
2678 		if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int),
2679 				&int_val, TRUE) != BCME_OK) {
2680 			DHD_ERROR(("invalid size/addr combination\n"));
2681 			return BCME_ERROR;
2682 		}
2683 
2684 		DHD_INFO(("%s: boardtype_backplane_addr 0x%08x rdata 0x%04x\n",
2685 			__FUNCTION__, boardtype_backplane_addr[i], int_val));
2686 	}
2687 
2688 	/* read tuple raw data */
2689 	for (i = 0; i < CIS_TUPLE_MAX_COUNT; i++) {
2690 		if (si_backplane_access(bus->sih, CIS_TUPLE_START_ADDRESS + i * sizeof(uint32),
2691 				sizeof(uint32),	&raw_data[i], TRUE) != BCME_OK) {
2692 			break;
2693 		}
2694 	}
2695 
2696 	totlen = i * sizeof(uint32);
2697 	tuple = (cis_tuple_format_t *)raw_data;
2698 
2699 	/* check the first tuple has tag 'start' */
2700 	if (tuple->id != CIS_TUPLE_TAG_START) {
2701 		return BCME_ERROR;
2702 	}
2703 
2704 	*vid_length = *boardtype = 0;
2705 
2706 	/* find tagged parameter */
2707 	while ((totlen >= (tuple->len + CIS_TUPLE_HDR_LEN)) &&
2708 			(*vid_length == 0 || *boardtype == 0)) {
2709 		len = tuple->len;
2710 
2711 		if ((tuple->tag == CIS_TUPLE_TAG_VENDOR) &&
2712 				(totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
2713 			/* found VID */
2714 			memcpy(vid, tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2715 			*vid_length = tuple->len - CIS_TUPLE_TAG_LENGTH;
2716 			prhex("OTP VID", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2717 		}
2718 		else if ((tuple->tag == CIS_TUPLE_TAG_BOARDTYPE) &&
2719 				(totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
2720 			/* found boardtype */
2721 			*boardtype = (int)tuple->data[0];
2722 			prhex("OTP boardtype", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2723 		}
2724 
2725 		tuple = (cis_tuple_format_t*)((uint8*)tuple + (len + CIS_TUPLE_HDR_LEN));
2726 		totlen -= (len + CIS_TUPLE_HDR_LEN);
2727 	}
2728 
2729 	if (*vid_length <= 0 || *boardtype <= 0) {
2730 		DHD_ERROR(("failed to parse information (vid=%d, boardtype=%d)\n",
2731 			*vid_length, *boardtype));
2732 		return BCME_ERROR;
2733 	}
2734 
2735 	return BCME_OK;
2736 
2737 }
2738 
2739 static naming_info_t *
dhd_find_naming_info_by_chip_rev(naming_info_t table[],int table_size,dhd_bus_t * bus,bool * is_murata_fem)2740 dhd_find_naming_info_by_chip_rev(naming_info_t table[], int table_size,
2741 	dhd_bus_t *bus, bool *is_murata_fem)
2742 {
2743 	int board_type = 0, chip_rev = 0, vid_length = 0;
2744 	unsigned char vid[MAX_VID_LEN];
2745 	naming_info_t *info = &table[0];
2746 	char *cid_info = NULL;
2747 
2748 	if (!bus || !bus->sih) {
2749 		DHD_ERROR(("%s:bus(%p) or bus->sih is NULL\n", __FUNCTION__, bus));
2750 		return NULL;
2751 	}
2752 	chip_rev = bus->sih->chiprev;
2753 
2754 	if (dhd_parse_board_information_bcm(bus, &board_type, vid, &vid_length)
2755 			!= BCME_OK) {
2756 		DHD_ERROR(("%s:failed to parse board information\n", __FUNCTION__));
2757 		return NULL;
2758 	}
2759 
2760 	DHD_INFO(("%s:chip version %d\n", __FUNCTION__, chip_rev));
2761 
2762 #if defined(BCM4361_CHIP)
2763 	/* A0 chipset has exception only */
2764 	if (chip_rev == CHIP_REV_A0) {
2765 		if (board_type == BOARD_TYPE_EPA) {
2766 			info = dhd_find_naming_info(table, table_size,
2767 				DEFAULT_CIDINFO_FOR_EPA);
2768 		} else if ((board_type == BOARD_TYPE_IPA) ||
2769 				(board_type == BOARD_TYPE_IPA_OLD)) {
2770 			info = dhd_find_naming_info(table, table_size,
2771 				DEFAULT_CIDINFO_FOR_IPA);
2772 		}
2773 	} else {
2774 		cid_info = dhd_get_cid_info(vid, vid_length);
2775 		if (cid_info) {
2776 			info = dhd_find_naming_info_by_cid(table, table_size, cid_info);
2777 			if (strstr(cid_info, CID_FEM_MURATA)) {
2778 				*is_murata_fem = TRUE;
2779 			}
2780 		}
2781 	}
2782 #else
2783 	cid_info = dhd_get_cid_info(vid, vid_length);
2784 	if (cid_info) {
2785 		info = dhd_find_naming_info_by_cid(table, table_size, cid_info);
2786 		if (strstr(cid_info, CID_FEM_MURATA)) {
2787 			*is_murata_fem = TRUE;
2788 		}
2789 	}
2790 #endif /* BCM4361_CHIP */
2791 
2792 	return info;
2793 }
2794 #endif /* USE_CID_CHECK */
2795 
2796 static int
concate_revision_bcm4361(dhd_bus_t * bus,char * fw_path,char * nv_path)2797 concate_revision_bcm4361(dhd_bus_t *bus, char *fw_path, char *nv_path)
2798 {
2799 	int ret = BCME_OK;
2800 #if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK)
2801 	char module_type[MAX_VNAME_LEN];
2802 	naming_info_t *info = NULL;
2803 	bool is_murata_fem = FALSE;
2804 
2805 	memset(module_type, 0, sizeof(module_type));
2806 
2807 	if (dhd_check_module_bcm(module_type,
2808 			MODULE_BCM4361_INDEX, &is_murata_fem) == BCME_OK) {
2809 		info = dhd_find_naming_info(bcm4361_naming_table,
2810 			ARRAYSIZE(bcm4361_naming_table), module_type);
2811 	} else {
2812 		/* in case of .cid.info doesn't exists */
2813 		info = dhd_find_naming_info_by_chip_rev(bcm4361_naming_table,
2814 			ARRAYSIZE(bcm4361_naming_table), bus, &is_murata_fem);
2815 	}
2816 
2817 	if (bcmstrnstr(nv_path, PATH_MAX,  "_murata", 7)) {
2818 		is_murata_fem = FALSE;
2819 	}
2820 
2821 	if (info) {
2822 		if (is_murata_fem) {
2823 			strncat(nv_path, NVRAM_FEM_MURATA, strlen(NVRAM_FEM_MURATA));
2824 		}
2825 		strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
2826 		strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
2827 	} else {
2828 		DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__));
2829 		ret = BCME_ERROR;
2830 	}
2831 #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2832 	char chipver_tag[10] = {0, };
2833 
2834 	strcat(fw_path, chipver_tag);
2835 	strcat(nv_path, chipver_tag);
2836 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2837 
2838 	return ret;
2839 }
2840 
2841 static int
concate_revision_bcm4375(dhd_bus_t * bus,char * fw_path,char * nv_path)2842 concate_revision_bcm4375(dhd_bus_t *bus, char *fw_path, char *nv_path)
2843 {
2844 	int ret = BCME_OK;
2845 #if defined(SUPPORT_BCM4375_MIXED_MODULES) && defined(USE_CID_CHECK)
2846 	char module_type[MAX_VNAME_LEN];
2847 	naming_info_t *info = NULL;
2848 	bool is_murata_fem = FALSE;
2849 
2850 	memset(module_type, 0, sizeof(module_type));
2851 
2852 	if (dhd_check_module_bcm(module_type,
2853 			MODULE_BCM4375_INDEX, &is_murata_fem) == BCME_OK) {
2854 		info = dhd_find_naming_info(bcm4375_naming_table,
2855 				ARRAYSIZE(bcm4375_naming_table), module_type);
2856 	} else {
2857 		/* in case of .cid.info doesn't exists */
2858 		info = dhd_find_naming_info_by_chip_rev(bcm4375_naming_table,
2859 				ARRAYSIZE(bcm4375_naming_table), bus, &is_murata_fem);
2860 	}
2861 
2862 	if (info) {
2863 		strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
2864 		strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
2865 	} else {
2866 		DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__));
2867 		ret = BCME_ERROR;
2868 	}
2869 #else /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */
2870 	char chipver_tag[10] = {0, };
2871 
2872 	strcat(fw_path, chipver_tag);
2873 	strcat(nv_path, chipver_tag);
2874 #endif /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */
2875 
2876 	return ret;
2877 }
2878 
2879 int
concate_revision(dhd_bus_t * bus,char * fw_path,char * nv_path)2880 concate_revision(dhd_bus_t *bus, char *fw_path, char *nv_path)
2881 {
2882 	int res = 0;
2883 
2884 	if (!bus || !bus->sih) {
2885 		DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
2886 		return -1;
2887 	}
2888 
2889 	if (!fw_path || !nv_path) {
2890 		DHD_ERROR(("fw_path or nv_path is null.\n"));
2891 		return res;
2892 	}
2893 
2894 	switch (si_chipid(bus->sih)) {
2895 
2896 	case BCM43569_CHIP_ID:
2897 	case BCM4358_CHIP_ID:
2898 		res = concate_revision_bcm4358(bus, fw_path, nv_path);
2899 		break;
2900 	case BCM4355_CHIP_ID:
2901 	case BCM4359_CHIP_ID:
2902 		res = concate_revision_bcm4359(bus, fw_path, nv_path);
2903 		break;
2904 	case BCM4361_CHIP_ID:
2905 	case BCM4347_CHIP_ID:
2906 		res = concate_revision_bcm4361(bus, fw_path, nv_path);
2907 		break;
2908 	case BCM4375_CHIP_ID:
2909 		res = concate_revision_bcm4375(bus, fw_path, nv_path);
2910 		break;
2911 	default:
2912 		DHD_ERROR(("REVISION SPECIFIC feature is not required\n"));
2913 		return res;
2914 	}
2915 
2916 	return res;
2917 }
2918 #endif /* SUPPORT_MULTIPLE_REVISION */
2919 
2920 uint16
dhd_get_chipid(dhd_pub_t * dhd)2921 dhd_get_chipid(dhd_pub_t *dhd)
2922 {
2923 	dhd_bus_t *bus = dhd->bus;
2924 
2925 	if (bus && bus->sih)
2926 		return (uint16)si_chipid(bus->sih);
2927 	else
2928 		return 0;
2929 }
2930 
2931 /**
2932  * Loads firmware given by caller supplied path and nvram image into PCIe dongle.
2933  *
2934  * BCM_REQUEST_FW specific :
2935  * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
2936  * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
2937  *
2938  * BCMEMBEDIMAGE specific:
2939  * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
2940  * file will be used instead.
2941  *
2942  * @return BCME_OK on success
2943  */
2944 int
dhd_bus_download_firmware(struct dhd_bus * bus,osl_t * osh,char * pfw_path,char * pnv_path)2945 dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
2946                           char *pfw_path, char *pnv_path)
2947 {
2948 	int ret;
2949 
2950 	bus->fw_path = pfw_path;
2951 	bus->nv_path = pnv_path;
2952 
2953 #if defined(SUPPORT_MULTIPLE_REVISION)
2954 	if (concate_revision(bus, bus->fw_path, bus->nv_path) != 0) {
2955 		DHD_ERROR(("%s: fail to concatnate revison \n",
2956 			__FUNCTION__));
2957 		return BCME_BADARG;
2958 	}
2959 #endif /* SUPPORT_MULTIPLE_REVISION */
2960 
2961 #if defined(DHD_BLOB_EXISTENCE_CHECK)
2962 	dhd_set_blob_support(bus->dhd, bus->fw_path);
2963 #endif /* DHD_BLOB_EXISTENCE_CHECK */
2964 
2965 	DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
2966 		__FUNCTION__, bus->fw_path, bus->nv_path));
2967 	dhdpcie_dump_resource(bus);
2968 
2969 	ret = dhdpcie_download_firmware(bus, osh);
2970 
2971 	return ret;
2972 }
2973 
2974 /**
2975  * Loads firmware given by 'bus->fw_path' into PCIe dongle.
2976  *
2977  * BCM_REQUEST_FW specific :
2978  * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
2979  * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
2980  *
2981  * BCMEMBEDIMAGE specific:
2982  * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
2983  * file will be used instead.
2984  *
2985  * @return BCME_OK on success
2986  */
2987 static int
dhdpcie_download_firmware(struct dhd_bus * bus,osl_t * osh)2988 dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
2989 {
2990 	int ret = 0;
2991 #if defined(BCM_REQUEST_FW)
2992 	uint chipid = bus->sih->chip;
2993 	uint revid = bus->sih->chiprev;
2994 	char fw_path[64] = "/lib/firmware/brcm/bcm";	/* path to firmware image */
2995 	char nv_path[64];		/* path to nvram vars file */
2996 	bus->fw_path = fw_path;
2997 	bus->nv_path = nv_path;
2998 	switch (chipid) {
2999 	case BCM43570_CHIP_ID:
3000 		bcmstrncat(fw_path, "43570", 5);
3001 		switch (revid) {
3002 		case 0:
3003 			bcmstrncat(fw_path, "a0", 2);
3004 			break;
3005 		case 2:
3006 			bcmstrncat(fw_path, "a2", 2);
3007 			break;
3008 		default:
3009 			DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__,
3010 			revid));
3011 			break;
3012 		}
3013 		break;
3014 	default:
3015 		DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__,
3016 		chipid));
3017 		return 0;
3018 	}
3019 	/* load board specific nvram file */
3020 	snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path);
3021 	/* load firmware */
3022 	snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path);
3023 #endif /* BCM_REQUEST_FW */
3024 
3025 	DHD_OS_WAKE_LOCK(bus->dhd);
3026 	ret = _dhdpcie_download_firmware(bus);
3027 
3028 	DHD_OS_WAKE_UNLOCK(bus->dhd);
3029 	return ret;
3030 } /* dhdpcie_download_firmware */
3031 
3032 #define DHD_MEMORY_SET_PATTERN 0xAA
3033 
3034 /**
3035  * Downloads a file containing firmware into dongle memory. In case of a .bea file, the DHD
3036  * is updated with the event logging partitions within that file as well.
3037  *
3038  * @param pfw_path    Path to .bin or .bea file
3039  */
3040 static int
dhdpcie_download_code_file(struct dhd_bus * bus,char * pfw_path)3041 dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
3042 {
3043 	int bcmerror = BCME_ERROR;
3044 	int offset = 0;
3045 	int len = 0;
3046 	bool store_reset;
3047 	char *imgbuf = NULL;
3048 	uint8 *memblock = NULL, *memptr = NULL;
3049 	int offset_end = bus->ramsize;
3050 	uint32 file_size = 0, read_len = 0;
3051 	struct trx_header *trx_hdr;
3052 	bool trx_chk = TRUE;
3053 
3054 #if defined(DHD_FW_MEM_CORRUPTION)
3055 	if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
3056 		dhd_tcm_test_enable = TRUE;
3057 	} else {
3058 		dhd_tcm_test_enable = FALSE;
3059 	}
3060 #endif /* DHD_FW_MEM_CORRUPTION */
3061 	DHD_ERROR(("%s: dhd_tcm_test_enable %u\n", __FUNCTION__, dhd_tcm_test_enable));
3062 	/* TCM check */
3063 	if (dhd_tcm_test_enable && !dhd_bus_tcm_test(bus)) {
3064 		DHD_ERROR(("dhd_bus_tcm_test failed\n"));
3065 		bcmerror = BCME_ERROR;
3066 		goto err;
3067 	}
3068 	DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
3069 
3070 	/* Should succeed in opening image if it is actually given through registry
3071 	 * entry or in module param.
3072 	 */
3073 	imgbuf = dhd_os_open_image1(bus->dhd, pfw_path);
3074 	if (imgbuf == NULL) {
3075 		goto err;
3076 	}
3077 
3078 	file_size = dhd_os_get_image_size(imgbuf);
3079 	if (!file_size) {
3080 		DHD_ERROR(("%s: get file size fails ! \n", __FUNCTION__));
3081 		goto err;
3082 	}
3083 
3084 	memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
3085 	if (memblock == NULL) {
3086 		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
3087 		bcmerror = BCME_NOMEM;
3088 		goto err;
3089 	}
3090 	if ((uint32)(uintptr)memblock % DHD_SDALIGN) {
3091 		memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
3092 	}
3093 
3094 	/* check if CR4/CA7 */
3095 	store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
3096 			si_setcore(bus->sih, ARMCA7_CORE_ID, 0));
3097 	/* Download image with MEMBLOCK size */
3098 	while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) {
3099 		if (len < 0) {
3100 			DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
3101 			bcmerror = BCME_ERROR;
3102 			goto err;
3103 		}
3104 			read_len += len;
3105 		if (read_len > file_size) {
3106 			DHD_ERROR(("%s: WARNING! reading beyond EOF, len=%d; read_len=%u;"
3107 				" file_size=%u truncating len to %d \n", __FUNCTION__,
3108 				len, read_len, file_size, (len - (read_len - file_size))));
3109 			len -= (read_len - file_size);
3110 		}
3111 
3112 		/* if address is 0, store the reset instruction to be written in 0 */
3113 		if (store_reset) {
3114 			ASSERT(offset == 0);
3115 			bus->resetinstr = *(((uint32*)memptr));
3116 			/* Add start of RAM address to the address given by user */
3117 			offset += bus->dongle_ram_base;
3118 			offset_end += offset;
3119 			store_reset = FALSE;
3120 		}
3121 
3122 		/* Check for trx file */
3123 		if (trx_chk && (len >= sizeof(struct trx_header))) {
3124 			trx_chk = FALSE;
3125 			trx_hdr = (struct trx_header *)memptr;
3126 			if (trx_hdr->magic == TRX_MAGIC) {
3127 				/* CYW55560, we need to write TRX header at RAMSTART */
3128 				offset -= sizeof(struct trx_header);
3129 			}
3130 		}
3131 
3132 		bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
3133 		if (bcmerror) {
3134 			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
3135 				__FUNCTION__, bcmerror, MEMBLOCK, offset));
3136 			goto err;
3137 		}
3138 		offset += MEMBLOCK;
3139 
3140 		if (offset >= offset_end) {
3141 			DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
3142 				__FUNCTION__, offset, offset_end));
3143 			bcmerror = BCME_ERROR;
3144 			goto err;
3145 		}
3146 
3147 		if (read_len >= file_size) {
3148 			break;
3149 		}
3150 	}
3151 err:
3152 	if (memblock) {
3153 		MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
3154 	}
3155 
3156 	if (imgbuf) {
3157 		dhd_os_close_image1(bus->dhd, imgbuf);
3158 	}
3159 
3160 	return bcmerror;
3161 } /* dhdpcie_download_code_file */
3162 
3163 #ifdef CUSTOMER_HW4_DEBUG
3164 #define MIN_NVRAMVARS_SIZE 128
3165 #endif /* CUSTOMER_HW4_DEBUG */
3166 
3167 static int
dhdpcie_download_nvram(struct dhd_bus * bus)3168 dhdpcie_download_nvram(struct dhd_bus *bus)
3169 {
3170 	int bcmerror = BCME_ERROR;
3171 	uint len;
3172 	char * memblock = NULL;
3173 	char *bufp;
3174 	char *pnv_path;
3175 	bool nvram_file_exists;
3176 	bool nvram_uefi_exists = FALSE;
3177 	bool local_alloc = FALSE;
3178 	pnv_path = bus->nv_path;
3179 
3180 	nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
3181 
3182 	/* First try UEFI */
3183 	len = MAX_NVRAMBUF_SIZE;
3184 	dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, (int *)&len);
3185 
3186 	/* If UEFI empty, then read from file system */
3187 	if ((len <= 0) || (memblock == NULL)) {
3188 
3189 		if (nvram_file_exists) {
3190 			len = MAX_NVRAMBUF_SIZE;
3191 			dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, (int *)&len);
3192 			if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) {
3193 				goto err;
3194 			}
3195 		}
3196 		else {
3197 			/* For SROM OTP no external file or UEFI required */
3198 			bcmerror = BCME_OK;
3199 		}
3200 	} else {
3201 		nvram_uefi_exists = TRUE;
3202 	}
3203 
3204 	DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len));
3205 
3206 	if (len > 0 && len <= MAX_NVRAMBUF_SIZE && memblock != NULL) {
3207 		bufp = (char *) memblock;
3208 
3209 		{
3210 			bufp[len] = 0;
3211 			if (nvram_uefi_exists || nvram_file_exists) {
3212 				len = process_nvram_vars(bufp, len);
3213 			}
3214 		}
3215 
3216 		DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len));
3217 #ifdef CUSTOMER_HW4_DEBUG
3218 		if (len < MIN_NVRAMVARS_SIZE) {
3219 			DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n",
3220 				__FUNCTION__));
3221 			bcmerror = BCME_ERROR;
3222 			goto err;
3223 		}
3224 #endif /* CUSTOMER_HW4_DEBUG */
3225 
3226 		if (len % 4) {
3227 			len += 4 - (len % 4);
3228 		}
3229 		bufp += len;
3230 		*bufp++ = 0;
3231 		if (len)
3232 			bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1);
3233 		if (bcmerror) {
3234 			DHD_ERROR(("%s: error downloading vars: %d\n",
3235 				__FUNCTION__, bcmerror));
3236 		}
3237 	}
3238 
3239 err:
3240 	if (memblock) {
3241 		if (local_alloc) {
3242 			MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
3243 		} else {
3244 			dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE);
3245 		}
3246 	}
3247 
3248 	return bcmerror;
3249 }
3250 
3251 static int
dhdpcie_ramsize_read_image(struct dhd_bus * bus,char * buf,int len)3252 dhdpcie_ramsize_read_image(struct dhd_bus *bus, char *buf, int len)
3253 {
3254 	int bcmerror = BCME_ERROR;
3255 	char *imgbuf = NULL;
3256 
3257 	if (buf == NULL || len == 0)
3258 		goto err;
3259 
3260 	/* External image takes precedence if specified */
3261 	if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
3262 		// opens and seeks to correct file offset:
3263 		imgbuf = dhd_os_open_image1(bus->dhd, bus->fw_path);
3264 		if (imgbuf == NULL) {
3265 			DHD_ERROR(("%s: Failed to open firmware file\n", __FUNCTION__));
3266 			goto err;
3267 		}
3268 
3269 		/* Read it */
3270 		if (len != dhd_os_get_image_block(buf, len, imgbuf)) {
3271 			DHD_ERROR(("%s: Failed to read %d bytes data\n", __FUNCTION__, len));
3272 			goto err;
3273 		}
3274 
3275 		bcmerror = BCME_OK;
3276 	}
3277 
3278 err:
3279 	if (imgbuf)
3280 		dhd_os_close_image1(bus->dhd, imgbuf);
3281 
3282 	return bcmerror;
3283 }
3284 
3285 /* The ramsize can be changed in the dongle image, for example 4365 chip share the sysmem
3286  * with BMC and we can adjust how many sysmem belong to CA7 during dongle compilation.
3287  * So in DHD we need to detect this case and update the correct dongle RAMSIZE as well.
3288  */
3289 static void
dhdpcie_ramsize_adj(struct dhd_bus * bus)3290 dhdpcie_ramsize_adj(struct dhd_bus *bus)
3291 {
3292 	int i, search_len = 0;
3293 	uint8 *memptr = NULL;
3294 	uint8 *ramsizeptr = NULL;
3295 	uint ramsizelen;
3296 	uint32 ramsize_ptr_ptr[] = {RAMSIZE_PTR_PTR_LIST};
3297 	hnd_ramsize_ptr_t ramsize_info;
3298 
3299 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3300 
3301 	/* Adjust dongle RAMSIZE already called. */
3302 	if (bus->ramsize_adjusted) {
3303 		return;
3304 	}
3305 
3306 	/* success or failure,  we don't want to be here
3307 	 * more than once.
3308 	 */
3309 	bus->ramsize_adjusted = TRUE;
3310 
3311 	/* Not handle if user restrict dongle ram size enabled */
3312 	if (dhd_dongle_memsize) {
3313 		DHD_ERROR(("%s: user restrict dongle ram size to %d.\n", __FUNCTION__,
3314 			dhd_dongle_memsize));
3315 		return;
3316 	}
3317 
3318 	/* Out immediately if no image to download */
3319 	if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
3320 		DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
3321 		return;
3322 	}
3323 
3324 	/* Get maximum RAMSIZE info search length */
3325 	for (i = 0; ; i++) {
3326 		if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END)
3327 			break;
3328 
3329 		if (search_len < (int)ramsize_ptr_ptr[i])
3330 			search_len = (int)ramsize_ptr_ptr[i];
3331 	}
3332 
3333 	if (!search_len)
3334 		return;
3335 
3336 	search_len += sizeof(hnd_ramsize_ptr_t);
3337 
3338 	memptr = MALLOC(bus->dhd->osh, search_len);
3339 	if (memptr == NULL) {
3340 		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, search_len));
3341 		return;
3342 	}
3343 
3344 	/* External image takes precedence if specified */
3345 	if (dhdpcie_ramsize_read_image(bus, (char *)memptr, search_len) != BCME_OK) {
3346 		goto err;
3347 	}
3348 	else {
3349 		ramsizeptr = memptr;
3350 		ramsizelen = search_len;
3351 	}
3352 
3353 	if (ramsizeptr) {
3354 		/* Check Magic */
3355 		for (i = 0; ; i++) {
3356 			if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END)
3357 				break;
3358 
3359 			if (ramsize_ptr_ptr[i] + sizeof(hnd_ramsize_ptr_t) > ramsizelen)
3360 				continue;
3361 
3362 			memcpy((char *)&ramsize_info, ramsizeptr + ramsize_ptr_ptr[i],
3363 				sizeof(hnd_ramsize_ptr_t));
3364 
3365 			if (ramsize_info.magic == HTOL32(HND_RAMSIZE_PTR_MAGIC)) {
3366 				bus->orig_ramsize = LTOH32(ramsize_info.ram_size);
3367 				bus->ramsize = LTOH32(ramsize_info.ram_size);
3368 				DHD_ERROR(("%s: Adjust dongle RAMSIZE to 0x%x\n", __FUNCTION__,
3369 					bus->ramsize));
3370 				break;
3371 			}
3372 		}
3373 	}
3374 
3375 err:
3376 	if (memptr)
3377 		MFREE(bus->dhd->osh, memptr, search_len);
3378 
3379 	return;
3380 } /* dhdpcie_ramsize_adj */
3381 
3382 /**
3383  * Downloads firmware file given by 'bus->fw_path' into PCIe dongle
3384  *
3385  * BCMEMBEDIMAGE specific:
3386  * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
3387  * file will be used instead.
3388  *
3389  */
3390 static int
_dhdpcie_download_firmware(struct dhd_bus * bus)3391 _dhdpcie_download_firmware(struct dhd_bus *bus)
3392 {
3393 	int bcmerror = -1;
3394 
3395 	bool embed = FALSE;	/* download embedded firmware */
3396 	bool dlok = FALSE;	/* download firmware succeeded */
3397 
3398 	/* Out immediately if no image to download */
3399 	if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
3400 		DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
3401 		return 0;
3402 	}
3403 	/* Adjust ram size */
3404 	dhdpcie_ramsize_adj(bus);
3405 
3406 	/* Keep arm in reset */
3407 	if (dhdpcie_bus_download_state(bus, TRUE)) {
3408 		DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
3409 		goto err;
3410 	}
3411 
3412 	/* External image takes precedence if specified */
3413 	if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
3414 		if (dhdpcie_download_code_file(bus, bus->fw_path)) {
3415 			DHD_ERROR(("%s:%d dongle image file download failed\n", __FUNCTION__,
3416 				__LINE__));
3417 			goto err;
3418 		} else {
3419 			embed = FALSE;
3420 			dlok = TRUE;
3421 		}
3422 	}
3423 
3424 	BCM_REFERENCE(embed);
3425 	if (!dlok) {
3426 		DHD_ERROR(("%s:%d dongle image download failed\n", __FUNCTION__, __LINE__));
3427 		goto err;
3428 	}
3429 
3430 	/* EXAMPLE: nvram_array */
3431 	/* If a valid nvram_arry is specified as above, it can be passed down to dongle */
3432 	/* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
3433 
3434 	/* External nvram takes precedence if specified */
3435 	if (dhdpcie_download_nvram(bus)) {
3436 		DHD_ERROR(("%s:%d dongle nvram file download failed\n", __FUNCTION__, __LINE__));
3437 		goto err;
3438 	}
3439 
3440 	/* Take arm out of reset */
3441 	if (dhdpcie_bus_download_state(bus, FALSE)) {
3442 		DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
3443 		goto err;
3444 	}
3445 
3446 	bcmerror = 0;
3447 
3448 err:
3449 	return bcmerror;
3450 } /* _dhdpcie_download_firmware */
3451 
3452 static int
dhdpcie_bus_readconsole(dhd_bus_t * bus)3453 dhdpcie_bus_readconsole(dhd_bus_t *bus)
3454 {
3455 	dhd_console_t *c = &bus->console;
3456 	uint8 line[CONSOLE_LINE_MAX], ch;
3457 	uint32 n, idx, addr;
3458 	int rv;
3459 	uint readlen = 0;
3460 	uint i = 0;
3461 
3462 	if (!DHD_FWLOG_ON())
3463 		return 0;
3464 
3465 	/* Don't do anything until FWREADY updates console address */
3466 	if (bus->console_addr == 0)
3467 		return -1;
3468 
3469 	/* Read console log struct */
3470 	addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
3471 
3472 	/* Check if console log struct addr has changed */
3473 	/* Save the address(Local copy) */
3474 	if (c->log_addr != addr) {
3475 		/* Reset last index pointer */
3476 		c->last = 0;
3477 		/* Re-allocate memory if console address changes */
3478 		if (c->buf) {
3479 			MFREE(bus->dhd->osh, c->buf, c->bufsize);
3480 			c->buf = NULL;
3481 		}
3482 		/* Save new console address */
3483 		c->log_addr = addr;
3484 	}
3485 
3486 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
3487 		return rv;
3488 
3489 	/* Allocate console buffer (one time only) */
3490 	if (c->buf == NULL) {
3491 		c->bufsize = ltoh32(c->log.buf_size);
3492 		if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
3493 			return BCME_NOMEM;
3494 		DHD_ERROR(("conlog: bufsize=0x%x\n", c->bufsize));
3495 	}
3496 	idx = ltoh32(c->log.idx);
3497 
3498 	/* Protect against corrupt value */
3499 	if (idx > c->bufsize)
3500 		return BCME_ERROR;
3501 
3502 	/* Skip reading the console buffer if the index pointer has not moved */
3503 	if (idx == c->last)
3504 		return BCME_OK;
3505 
3506 	DHD_ERROR(("conlog: addr=0x%x, idx=0x%x, last=0x%x \n", c->log.buf,
3507 	   idx, c->last));
3508 
3509 	/* Read the console buffer data to a local buffer */
3510 	/* optimize and read only the portion of the buffer needed, but
3511 	 * important to handle wrap-around.
3512 	 */
3513 	addr = ltoh32(c->log.buf);
3514 
3515 	/* wrap around case - write ptr < read ptr */
3516 	if (idx < c->last) {
3517 		/* from read ptr to end of buffer */
3518 		readlen = c->bufsize - c->last;
3519 		if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3520 				addr + c->last, c->buf, readlen)) < 0) {
3521 			DHD_ERROR(("conlog: read error[1] ! \n"));
3522 			return rv;
3523 		}
3524 		/* from beginning of buffer to write ptr */
3525 		if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3526 				addr, c->buf + readlen,
3527 				idx)) < 0) {
3528 			DHD_ERROR(("conlog: read error[2] ! \n"));
3529 			return rv;
3530 		}
3531 		readlen += idx;
3532 	} else {
3533 		/* non-wraparound case, write ptr > read ptr */
3534 		readlen = (uint)idx - c->last;
3535 		if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3536 				addr + c->last, c->buf, readlen)) < 0) {
3537 			DHD_ERROR(("conlog: read error[3] ! \n"));
3538 			return rv;
3539 		}
3540 	}
3541 	/* update read ptr */
3542 	c->last = idx;
3543 
3544 	/* now output the read data from the local buffer to the host console */
3545 	while (i < readlen) {
3546 		for (n = 0; n < CONSOLE_LINE_MAX - 2 && i < readlen; n++) {
3547 			ch = c->buf[i];
3548 			++i;
3549 			if (ch == '\n')
3550 				break;
3551 			line[n] = ch;
3552 		}
3553 
3554 		if (n > 0) {
3555 			if (line[n - 1] == '\r')
3556 				n--;
3557 			line[n] = 0;
3558 			DHD_FWLOG(("CONSOLE: %s\n", line));
3559 		}
3560 	}
3561 
3562 	return BCME_OK;
3563 
3564 } /* dhdpcie_bus_readconsole */
3565 
3566 void
dhd_bus_dump_console_buffer(dhd_bus_t * bus)3567 dhd_bus_dump_console_buffer(dhd_bus_t *bus)
3568 {
3569 	uint32 n, i;
3570 	uint32 addr;
3571 	char *console_buffer = NULL;
3572 	uint32 console_ptr, console_size, console_index;
3573 	uint8 line[CONSOLE_LINE_MAX], ch;
3574 	int rv;
3575 
3576 	DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__));
3577 
3578 	if (bus->is_linkdown) {
3579 		DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n", __FUNCTION__));
3580 		return;
3581 	}
3582 
3583 	addr =	bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
3584 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3585 		(uint8 *)&console_ptr, sizeof(console_ptr))) < 0) {
3586 		goto exit;
3587 	}
3588 
3589 	addr =	bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
3590 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3591 		(uint8 *)&console_size, sizeof(console_size))) < 0) {
3592 		goto exit;
3593 	}
3594 
3595 	addr =	bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
3596 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3597 		(uint8 *)&console_index, sizeof(console_index))) < 0) {
3598 		goto exit;
3599 	}
3600 
3601 	console_ptr = ltoh32(console_ptr);
3602 	console_size = ltoh32(console_size);
3603 	console_index = ltoh32(console_index);
3604 
3605 	if (console_size > CONSOLE_BUFFER_MAX ||
3606 		!(console_buffer = MALLOC(bus->dhd->osh, console_size))) {
3607 		goto exit;
3608 	}
3609 
3610 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
3611 		(uint8 *)console_buffer, console_size)) < 0) {
3612 		goto exit;
3613 	}
3614 
3615 	for (i = 0, n = 0; i < console_size; i += n + 1) {
3616 		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
3617 			ch = console_buffer[(console_index + i + n) % console_size];
3618 			if (ch == '\n')
3619 				break;
3620 			line[n] = ch;
3621 		}
3622 
3623 		if (n > 0) {
3624 			if (line[n - 1] == '\r')
3625 				n--;
3626 			line[n] = 0;
3627 			/* Don't use DHD_ERROR macro since we print
3628 			 * a lot of information quickly. The macro
3629 			 * will truncate a lot of the printfs
3630 			 */
3631 
3632 			DHD_FWLOG(("CONSOLE: %s\n", line));
3633 		}
3634 	}
3635 
3636 exit:
3637 	if (console_buffer)
3638 		MFREE(bus->dhd->osh, console_buffer, console_size);
3639 	return;
3640 }
3641 
3642 /**
3643  * Opens the file given by bus->fw_path, reads part of the file into a buffer and closes the file.
3644  *
3645  * @return BCME_OK on success
3646  */
3647 static int
dhdpcie_checkdied(dhd_bus_t * bus,char * data,uint size)3648 dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
3649 {
3650 	int bcmerror = 0;
3651 	uint msize = 512;
3652 	char *mbuffer = NULL;
3653 	uint maxstrlen = 256;
3654 	char *str = NULL;
3655 	pciedev_shared_t *local_pciedev_shared = bus->pcie_sh;
3656 	struct bcmstrbuf strbuf;
3657 	unsigned long flags;
3658 	bool dongle_trap_occured = FALSE;
3659 
3660 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3661 
3662 	if (DHD_NOCHECKDIED_ON()) {
3663 		return 0;
3664 	}
3665 
3666 	if (data == NULL) {
3667 		/*
3668 		 * Called after a rx ctrl timeout. "data" is NULL.
3669 		 * allocate memory to trace the trap or assert.
3670 		 */
3671 		size = msize;
3672 		mbuffer = data = MALLOC(bus->dhd->osh, msize);
3673 
3674 		if (mbuffer == NULL) {
3675 			DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
3676 			bcmerror = BCME_NOMEM;
3677 			goto done2;
3678 		}
3679 	}
3680 
3681 	if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
3682 		DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
3683 		bcmerror = BCME_NOMEM;
3684 		goto done2;
3685 	}
3686 	DHD_GENERAL_LOCK(bus->dhd, flags);
3687 	DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd);
3688 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
3689 
3690 	if (MULTIBP_ENAB(bus->sih)) {
3691 		dhd_bus_pcie_pwr_req(bus);
3692 	}
3693 	if ((bcmerror = dhdpcie_readshared(bus)) < 0) {
3694 		goto done1;
3695 	}
3696 
3697 	bcm_binit(&strbuf, data, size);
3698 
3699 	bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address  : 0x%08X\n",
3700 	            local_pciedev_shared->msgtrace_addr, local_pciedev_shared->console_addr);
3701 
3702 	if ((local_pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
3703 		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
3704 		 * (Avoids conflict with real asserts for programmatic parsing of output.)
3705 		 */
3706 		bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
3707 	}
3708 
3709 	if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) {
3710 		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
3711 		 * (Avoids conflict with real asserts for programmatic parsing of output.)
3712 		 */
3713 		bcm_bprintf(&strbuf, "No trap%s in dongle",
3714 		          (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT)
3715 		          ?"/assrt" :"");
3716 	} else {
3717 		if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) {
3718 			/* Download assert */
3719 			bcm_bprintf(&strbuf, "Dongle assert");
3720 			if (bus->pcie_sh->assert_exp_addr != 0) {
3721 				str[0] = '\0';
3722 				if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
3723 					bus->pcie_sh->assert_exp_addr,
3724 					(uint8 *)str, maxstrlen)) < 0) {
3725 					goto done1;
3726 				}
3727 
3728 				str[maxstrlen - 1] = '\0';
3729 				bcm_bprintf(&strbuf, " expr \"%s\"", str);
3730 			}
3731 
3732 			if (bus->pcie_sh->assert_file_addr != 0) {
3733 				str[0] = '\0';
3734 				if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
3735 					bus->pcie_sh->assert_file_addr,
3736 					(uint8 *)str, maxstrlen)) < 0) {
3737 					goto done1;
3738 				}
3739 
3740 				str[maxstrlen - 1] = '\0';
3741 				bcm_bprintf(&strbuf, " file \"%s\"", str);
3742 			}
3743 
3744 			bcm_bprintf(&strbuf, " line %d ",  bus->pcie_sh->assert_line);
3745 		}
3746 
3747 		if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
3748 			trap_t *tr = &bus->dhd->last_trap_info;
3749 			dongle_trap_occured = TRUE;
3750 			if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
3751 				bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) {
3752 				bus->dhd->dongle_trap_occured = TRUE;
3753 				goto done1;
3754 			}
3755 			dhd_bus_dump_trap_info(bus, &strbuf);
3756 		}
3757 	}
3758 
3759 	if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
3760 		DHD_FWLOG(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
3761 
3762 		dhd_bus_dump_console_buffer(bus);
3763 		dhd_prot_debug_info_print(bus->dhd);
3764 
3765 #if defined(DHD_FW_COREDUMP)
3766 		/* save core dump or write to a file */
3767 		if (bus->dhd->memdump_enabled) {
3768 #ifdef DHD_SSSR_DUMP
3769 			bus->dhd->collect_sssr = TRUE;
3770 #endif /* DHD_SSSR_DUMP */
3771 			bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
3772 			dhdpcie_mem_dump(bus);
3773 		}
3774 #endif /* DHD_FW_COREDUMP */
3775 
3776 		/* set the trap occured flag only after all the memdump,
3777 		* logdump and sssr dump collection has been scheduled
3778 		*/
3779 		if (dongle_trap_occured) {
3780 			bus->dhd->dongle_trap_occured = TRUE;
3781 		}
3782 
3783 		/* wake up IOCTL wait event */
3784 		dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
3785 
3786 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
3787 		copy_hang_info_trap(bus->dhd);
3788 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
3789 		dhd_schedule_reset(bus->dhd);
3790 
3791 	}
3792 
3793 done1:
3794 	if (MULTIBP_ENAB(bus->sih)) {
3795 		dhd_bus_pcie_pwr_req_clear(bus);
3796 	}
3797 
3798 	DHD_GENERAL_LOCK(bus->dhd, flags);
3799 	DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd);
3800 	dhd_os_busbusy_wake(bus->dhd);
3801 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
3802 done2:
3803 	if (mbuffer)
3804 		MFREE(bus->dhd->osh, mbuffer, msize);
3805 	if (str)
3806 		MFREE(bus->dhd->osh, str, maxstrlen);
3807 
3808 	return bcmerror;
3809 } /* dhdpcie_checkdied */
3810 
3811 /* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */
dhdpcie_mem_dump_bugcheck(dhd_bus_t * bus,uint8 * buf)3812 void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf)
3813 {
3814 	int ret = 0;
3815 	int size; /* Full mem size */
3816 	int start; /* Start address */
3817 	int read_size = 0; /* Read size of each iteration */
3818 	uint8 *databuf = buf;
3819 
3820 	if (bus == NULL) {
3821 		return;
3822 	}
3823 
3824 	start = bus->dongle_ram_base;
3825 	read_size = 4;
3826 	/* check for dead bus */
3827 	{
3828 		uint test_word = 0;
3829 		ret = dhdpcie_bus_membytes(bus, FALSE, start, (uint8*)&test_word, read_size);
3830 		/* if read error or bus timeout */
3831 		if (ret || (test_word == 0xFFFFFFFF)) {
3832 			return;
3833 		}
3834 	}
3835 
3836 	/* Get full mem size */
3837 	size = bus->ramsize;
3838 	/* Read mem content */
3839 	while (size)
3840 	{
3841 		read_size = MIN(MEMBLOCK, size);
3842 		if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) {
3843 			return;
3844 		}
3845 
3846 		/* Decrement size and increment start address */
3847 		size -= read_size;
3848 		start += read_size;
3849 		databuf += read_size;
3850 	}
3851 	bus->dhd->soc_ram = buf;
3852 	bus->dhd->soc_ram_length = bus->ramsize;
3853 	return;
3854 }
3855 
3856 #if defined(DHD_FW_COREDUMP)
3857 static int
dhdpcie_get_mem_dump(dhd_bus_t * bus)3858 dhdpcie_get_mem_dump(dhd_bus_t *bus)
3859 {
3860 	int ret = BCME_OK;
3861 	int size = 0;
3862 	int start = 0;
3863 	int read_size = 0; /* Read size of each iteration */
3864 	uint8 *p_buf = NULL, *databuf = NULL;
3865 
3866 	if (!bus) {
3867 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
3868 		return BCME_ERROR;
3869 	}
3870 
3871 	if (!bus->dhd) {
3872 		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
3873 		return BCME_ERROR;
3874 	}
3875 
3876 	size = bus->ramsize; /* Full mem size */
3877 	start = bus->dongle_ram_base; /* Start address */
3878 
3879 	/* Get full mem size */
3880 	p_buf = dhd_get_fwdump_buf(bus->dhd, size);
3881 	if (!p_buf) {
3882 		DHD_ERROR(("%s: Out of memory (%d bytes)\n",
3883 			__FUNCTION__, size));
3884 		return BCME_ERROR;
3885 	}
3886 
3887 	/* Read mem content */
3888 	DHD_TRACE_HW4(("Dump dongle memory\n"));
3889 	databuf = p_buf;
3890 	while (size > 0) {
3891 		read_size = MIN(MEMBLOCK, size);
3892 		ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size);
3893 		if (ret) {
3894 			DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
3895 #ifdef DHD_DEBUG_UART
3896 			bus->dhd->memdump_success = FALSE;
3897 #endif	/* DHD_DEBUG_UART */
3898 			break;
3899 		}
3900 		DHD_TRACE(("."));
3901 
3902 		/* Decrement size and increment start address */
3903 		size -= read_size;
3904 		start += read_size;
3905 		databuf += read_size;
3906 	}
3907 
3908 	return ret;
3909 }
3910 
3911 static int
dhdpcie_mem_dump(dhd_bus_t * bus)3912 dhdpcie_mem_dump(dhd_bus_t *bus)
3913 {
3914 	dhd_pub_t *dhdp;
3915 	int ret;
3916 
3917 #ifdef EXYNOS_PCIE_DEBUG
3918 	exynos_pcie_register_dump(1);
3919 #endif /* EXYNOS_PCIE_DEBUG */
3920 
3921 #ifdef SUPPORT_LINKDOWN_RECOVERY
3922 	if (bus->is_linkdown) {
3923 		DHD_ERROR(("%s: PCIe link is down so skip\n", __FUNCTION__));
3924 		/* panic only for DUMP_MEMFILE_BUGON */
3925 		ASSERT(bus->dhd->memdump_enabled != DUMP_MEMFILE_BUGON);
3926 		return BCME_ERROR;
3927 	}
3928 #endif /* SUPPORT_LINKDOWN_RECOVERY */
3929 
3930 	dhdp = bus->dhd;
3931 	if (!dhdp) {
3932 		DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
3933 		return BCME_ERROR;
3934 	}
3935 
3936 	if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
3937 		DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
3938 		return BCME_ERROR;
3939 	}
3940 
3941 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3942 	if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0)
3943 		return BCME_ERROR;
3944 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
3945 
3946 	ret = dhdpcie_get_mem_dump(bus);
3947 	if (ret) {
3948 		DHD_ERROR(("%s: failed to get mem dump, err=%d\n",
3949 			__FUNCTION__, ret));
3950 		return ret;
3951 	}
3952 
3953 	dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
3954 	/* buf, actually soc_ram free handled in dhd_{free,clear} */
3955 
3956 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3957 	pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
3958 	pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
3959 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
3960 
3961 	return ret;
3962 }
3963 
3964 int
dhd_bus_get_mem_dump(dhd_pub_t * dhdp)3965 dhd_bus_get_mem_dump(dhd_pub_t *dhdp)
3966 {
3967 	if (!dhdp) {
3968 		DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
3969 		return BCME_ERROR;
3970 	}
3971 
3972 	return dhdpcie_get_mem_dump(dhdp->bus);
3973 }
3974 
3975 int
dhd_bus_mem_dump(dhd_pub_t * dhdp)3976 dhd_bus_mem_dump(dhd_pub_t *dhdp)
3977 {
3978 	dhd_bus_t *bus = dhdp->bus;
3979 	int ret = BCME_ERROR;
3980 
3981 	if (dhdp->busstate == DHD_BUS_DOWN) {
3982 		DHD_ERROR(("%s bus is down\n", __FUNCTION__));
3983 		return BCME_ERROR;
3984 	}
3985 
3986 	/* Try to resume if already suspended or suspend in progress */
3987 #ifdef DHD_PCIE_RUNTIMEPM
3988 	dhdpcie_runtime_bus_wake(dhdp, CAN_SLEEP(), __builtin_return_address(0));
3989 #endif /* DHD_PCIE_RUNTIMEPM */
3990 
3991 	/* Skip if still in suspended or suspend in progress */
3992 	if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp)) {
3993 		DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
3994 			__FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
3995 		return BCME_ERROR;
3996 	}
3997 
3998 	DHD_OS_WAKE_LOCK(dhdp);
3999 	ret = dhdpcie_mem_dump(bus);
4000 	DHD_OS_WAKE_UNLOCK(dhdp);
4001 	return ret;
4002 }
4003 #endif	/* DHD_FW_COREDUMP */
4004 
4005 int
dhd_socram_dump(dhd_bus_t * bus)4006 dhd_socram_dump(dhd_bus_t *bus)
4007 {
4008 #if defined(DHD_FW_COREDUMP)
4009 	DHD_OS_WAKE_LOCK(bus->dhd);
4010 	dhd_bus_mem_dump(bus->dhd);
4011 	DHD_OS_WAKE_UNLOCK(bus->dhd);
4012 	return 0;
4013 #else
4014 	return -1;
4015 #endif // endif
4016 }
4017 
4018 /**
4019  * Transfers bytes from host to dongle using pio mode.
4020  * Parameter 'address' is a backplane address.
4021  */
4022 static int
dhdpcie_bus_membytes(dhd_bus_t * bus,bool write,ulong address,uint8 * data,uint size)4023 dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size)
4024 {
4025 	uint dsize;
4026 	int detect_endian_flag = 0x01;
4027 	bool little_endian;
4028 
4029 	if (write && bus->is_linkdown) {
4030 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
4031 		return BCME_ERROR;
4032 	}
4033 
4034 	if (MULTIBP_ENAB(bus->sih)) {
4035 		dhd_bus_pcie_pwr_req(bus);
4036 	}
4037 	/* Detect endianness. */
4038 	little_endian = *(char *)&detect_endian_flag;
4039 
4040 	/* In remap mode, adjust address beyond socram and redirect
4041 	 * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
4042 	 * is not backplane accessible
4043 	 */
4044 
4045 	/* Determine initial transfer parameters */
4046 #ifdef DHD_SUPPORT_64BIT
4047 	dsize = sizeof(uint64);
4048 #else /* !DHD_SUPPORT_64BIT */
4049 	dsize = sizeof(uint32);
4050 #endif /* DHD_SUPPORT_64BIT */
4051 
4052 	/* Do the transfer(s) */
4053 	if (write) {
4054 		while (size) {
4055 #ifdef DHD_SUPPORT_64BIT
4056 			if (size >= sizeof(uint64) && little_endian &&	!(address % 8)) {
4057 				dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
4058 			}
4059 #else /* !DHD_SUPPORT_64BIT */
4060 			if (size >= sizeof(uint32) && little_endian &&	!(address % 4)) {
4061 				dhdpcie_bus_wtcm32(bus, address, *((uint32*)data));
4062 			}
4063 #endif /* DHD_SUPPORT_64BIT */
4064 			else {
4065 				dsize = sizeof(uint8);
4066 				dhdpcie_bus_wtcm8(bus, address, *data);
4067 			}
4068 
4069 			/* Adjust for next transfer (if any) */
4070 			if ((size -= dsize)) {
4071 				data += dsize;
4072 				address += dsize;
4073 			}
4074 		}
4075 	} else {
4076 		while (size) {
4077 #ifdef DHD_SUPPORT_64BIT
4078 			if (size >= sizeof(uint64) && little_endian &&	!(address % 8))
4079 			{
4080 				*(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
4081 			}
4082 #else /* !DHD_SUPPORT_64BIT */
4083 			if (size >= sizeof(uint32) && little_endian &&	!(address % 4))
4084 			{
4085 				*(uint32 *)data = dhdpcie_bus_rtcm32(bus, address);
4086 			}
4087 #endif /* DHD_SUPPORT_64BIT */
4088 			else {
4089 				dsize = sizeof(uint8);
4090 				*data = dhdpcie_bus_rtcm8(bus, address);
4091 			}
4092 
4093 			/* Adjust for next transfer (if any) */
4094 			if ((size -= dsize) > 0) {
4095 				data += dsize;
4096 				address += dsize;
4097 			}
4098 		}
4099 	}
4100 	if (MULTIBP_ENAB(bus->sih)) {
4101 		dhd_bus_pcie_pwr_req_clear(bus);
4102 	}
4103 	return BCME_OK;
4104 } /* dhdpcie_bus_membytes */
4105 
4106 /**
4107  * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue
4108  * to the (non flow controlled) flow ring.
4109  */
4110 int BCMFASTPATH
dhd_bus_schedule_queue(struct dhd_bus * bus,uint16 flow_id,bool txs)4111 dhd_bus_schedule_queue(struct dhd_bus  *bus, uint16 flow_id, bool txs)
4112 {
4113 	flow_ring_node_t *flow_ring_node;
4114 	int ret = BCME_OK;
4115 #ifdef DHD_LOSSLESS_ROAMING
4116 	dhd_pub_t *dhdp = bus->dhd;
4117 #endif // endif
4118 	DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
4119 
4120 	/* ASSERT on flow_id */
4121 	if (flow_id >= bus->max_submission_rings) {
4122 		DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__,
4123 			flow_id, bus->max_submission_rings));
4124 		return 0;
4125 	}
4126 
4127 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
4128 
4129 	if (flow_ring_node->prot_info == NULL) {
4130 	    DHD_ERROR((" %s : invalid flow_ring_node \n", __FUNCTION__));
4131 	    return BCME_NOTREADY;
4132 	}
4133 
4134 #ifdef DHD_LOSSLESS_ROAMING
4135 	if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) {
4136 		DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n",
4137 			__FUNCTION__, flow_ring_node->flow_info.tid));
4138 		return BCME_OK;
4139 	}
4140 #endif /* DHD_LOSSLESS_ROAMING */
4141 
4142 	{
4143 		unsigned long flags;
4144 		void *txp = NULL;
4145 		flow_queue_t *queue;
4146 #ifdef DHD_LOSSLESS_ROAMING
4147 		struct ether_header *eh;
4148 		uint8 *pktdata;
4149 #endif /* DHD_LOSSLESS_ROAMING */
4150 
4151 		queue = &flow_ring_node->queue; /* queue associated with flow ring */
4152 
4153 		DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4154 
4155 		if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
4156 			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4157 			return BCME_NOTREADY;
4158 		}
4159 
4160 		while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
4161 			PKTORPHAN(txp);
4162 
4163 			/*
4164 			 * Modifying the packet length caused P2P cert failures.
4165 			 * Specifically on test cases where a packet of size 52 bytes
4166 			 * was injected, the sniffer capture showed 62 bytes because of
4167 			 * which the cert tests failed. So making the below change
4168 			 * only Router specific.
4169 			 */
4170 
4171 #ifdef DHDTCPACK_SUPPRESS
4172 			if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) {
4173 				ret = dhd_tcpack_check_xmit(bus->dhd, txp);
4174 				if (ret != BCME_OK) {
4175 					DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n",
4176 						__FUNCTION__));
4177 				}
4178 			}
4179 #endif /* DHDTCPACK_SUPPRESS */
4180 #ifdef DHD_LOSSLESS_ROAMING
4181 			pktdata = (uint8 *)PKTDATA(OSH_NULL, txp);
4182 			eh = (struct ether_header *) pktdata;
4183 			if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
4184 				uint8 prio = (uint8)PKTPRIO(txp);
4185 				/* Restore to original priority for 802.1X packet */
4186 				if (prio == PRIO_8021D_NC) {
4187 					PKTSETPRIO(txp, dhdp->prio_8021x);
4188 				}
4189 			}
4190 #endif /* DHD_LOSSLESS_ROAMING */
4191 			/* Attempt to transfer packet over flow ring */
4192 			ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
4193 			if (ret != BCME_OK) { /* may not have resources in flow ring */
4194 				DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
4195 				dhd_prot_txdata_write_flush(bus->dhd, flow_id);
4196 				/* reinsert at head */
4197 				dhd_flow_queue_reinsert(bus->dhd, queue, txp);
4198 				DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4199 
4200 				/* If we are able to requeue back, return success */
4201 				return BCME_OK;
4202 			}
4203 		}
4204 
4205 #ifdef DHD_HP2P
4206 		if (!flow_ring_node->hp2p_ring) {
4207 			dhd_prot_txdata_write_flush(bus->dhd, flow_id);
4208 		}
4209 #else
4210 		dhd_prot_txdata_write_flush(bus->dhd, flow_id);
4211 #endif // endif
4212 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4213 	}
4214 
4215 	return ret;
4216 } /* dhd_bus_schedule_queue */
4217 
4218 /** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */
4219 int BCMFASTPATH
dhd_bus_txdata(struct dhd_bus * bus,void * txp,uint8 ifidx)4220 dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
4221 {
4222 	uint16 flowid;
4223 #ifdef IDLE_TX_FLOW_MGMT
4224 	uint8	node_status;
4225 #endif /* IDLE_TX_FLOW_MGMT */
4226 	flow_queue_t *queue;
4227 	flow_ring_node_t *flow_ring_node;
4228 	unsigned long flags;
4229 	int ret = BCME_OK;
4230 	void *txp_pend = NULL;
4231 
4232 	if (!bus->dhd->flowid_allocator) {
4233 		DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
4234 		goto toss;
4235 	}
4236 
4237 	flowid = DHD_PKT_GET_FLOWID(txp);
4238 
4239 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
4240 
4241 	DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
4242 		__FUNCTION__, flowid, flow_ring_node->status, flow_ring_node->active));
4243 
4244 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4245 	if ((flowid >= bus->dhd->num_flow_rings) ||
4246 #ifdef IDLE_TX_FLOW_MGMT
4247 		(!flow_ring_node->active))
4248 #else
4249 		(!flow_ring_node->active) ||
4250 		(flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) ||
4251 		(flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING))
4252 #endif /* IDLE_TX_FLOW_MGMT */
4253 	{
4254 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4255 		DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
4256 			__FUNCTION__, flowid, flow_ring_node->status,
4257 			flow_ring_node->active));
4258 		ret = BCME_ERROR;
4259 			goto toss;
4260 	}
4261 
4262 #ifdef IDLE_TX_FLOW_MGMT
4263 	node_status = flow_ring_node->status;
4264 
4265 	/* handle diffrent status states here!! */
4266 	switch (node_status)
4267 	{
4268 		case FLOW_RING_STATUS_OPEN:
4269 
4270 			if (bus->enable_idle_flowring_mgmt) {
4271 				/* Move the node to the head of active list */
4272 				dhd_flow_ring_move_to_active_list_head(bus, flow_ring_node);
4273 			}
4274 			break;
4275 
4276 		case FLOW_RING_STATUS_SUSPENDED:
4277 			DHD_INFO(("Need to Initiate TX Flow resume\n"));
4278 			/* Issue resume_ring request */
4279 			dhd_bus_flow_ring_resume_request(bus,
4280 					flow_ring_node);
4281 			break;
4282 
4283 		case FLOW_RING_STATUS_CREATE_PENDING:
4284 		case FLOW_RING_STATUS_RESUME_PENDING:
4285 			/* Dont do anything here!! */
4286 			DHD_INFO(("Waiting for Flow create/resume! status is %u\n",
4287 				node_status));
4288 			break;
4289 
4290 		case FLOW_RING_STATUS_DELETE_PENDING:
4291 		default:
4292 			DHD_ERROR(("Dropping packet!! flowid %u status is %u\n",
4293 				flowid, node_status));
4294 			/* error here!! */
4295 			ret = BCME_ERROR;
4296 			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4297 			goto toss;
4298 	}
4299 	/* Now queue the packet */
4300 #endif /* IDLE_TX_FLOW_MGMT */
4301 
4302 	queue = &flow_ring_node->queue; /* queue associated with flow ring */
4303 
4304 	if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK)
4305 		txp_pend = txp;
4306 
4307 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4308 
4309 	if (flow_ring_node->status) {
4310 		DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
4311 		    __FUNCTION__, flowid, flow_ring_node->status,
4312 		    flow_ring_node->active));
4313 		if (txp_pend) {
4314 			txp = txp_pend;
4315 			goto toss;
4316 		}
4317 		return BCME_OK;
4318 	}
4319 	ret = dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
4320 
4321 	/* If we have anything pending, try to push into q */
4322 	if (txp_pend) {
4323 		DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4324 
4325 		if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
4326 			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4327 			txp = txp_pend;
4328 			goto toss;
4329 		}
4330 
4331 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4332 	}
4333 
4334 	return ret;
4335 
4336 toss:
4337 	DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
4338 	PKTCFREE(bus->dhd->osh, txp, TRUE);
4339 	return ret;
4340 } /* dhd_bus_txdata */
4341 
4342 void
dhd_bus_stop_queue(struct dhd_bus * bus)4343 dhd_bus_stop_queue(struct dhd_bus *bus)
4344 {
4345 	dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
4346 }
4347 
4348 void
dhd_bus_start_queue(struct dhd_bus * bus)4349 dhd_bus_start_queue(struct dhd_bus *bus)
4350 {
4351 	/*
4352 	 * Tx queue has been stopped due to resource shortage (or)
4353 	 * bus is not in a state to turn on.
4354 	 *
4355 	 * Note that we try to re-start network interface only
4356 	 * when we have enough resources, one has to first change the
4357 	 * flag indicating we have all the resources.
4358 	 */
4359 	if (dhd_prot_check_tx_resource(bus->dhd)) {
4360 		DHD_ERROR(("%s: Interface NOT started, previously stopped "
4361 			"due to resource shortage\n", __FUNCTION__));
4362 		return;
4363 	}
4364 	dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
4365 }
4366 
4367 /* Device console input function */
dhd_bus_console_in(dhd_pub_t * dhd,uchar * msg,uint msglen)4368 int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
4369 {
4370 	dhd_bus_t *bus = dhd->bus;
4371 	uint32 addr, val;
4372 	int rv;
4373 	/* Address could be zero if CONSOLE := 0 in dongle Makefile */
4374 	if (bus->console_addr == 0)
4375 		return BCME_UNSUPPORTED;
4376 
4377 	/* Don't allow input if dongle is in reset */
4378 	if (bus->dhd->dongle_reset) {
4379 		return BCME_NOTREADY;
4380 	}
4381 
4382 	/* Zero cbuf_index */
4383 	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
4384 	val = htol32(0);
4385 	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
4386 		goto done;
4387 
4388 	/* Write message into cbuf */
4389 	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
4390 	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
4391 		goto done;
4392 
4393 	/* Write length into vcons_in */
4394 	addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
4395 	val = htol32(msglen);
4396 	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
4397 		goto done;
4398 
4399 	/* generate an interrupt to dongle to indicate that it needs to process cons command */
4400 	dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT);
4401 done:
4402 	return rv;
4403 } /* dhd_bus_console_in */
4404 
4405 /**
4406  * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is
4407  * contained in 'pkt'. Processes rx frame, forwards up the layer to netif.
4408  */
4409 void BCMFASTPATH
dhd_bus_rx_frame(struct dhd_bus * bus,void * pkt,int ifidx,uint pkt_count)4410 dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count)
4411 {
4412 	dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
4413 }
4414 
4415 void
dhdpcie_setbar1win(dhd_bus_t * bus,uint32 addr)4416 dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr)
4417 {
4418 	dhdpcie_os_setbar1win(bus, addr);
4419 }
4420 
4421 /** 'offset' is a backplane address */
4422 void
dhdpcie_bus_wtcm8(dhd_bus_t * bus,ulong offset,uint8 data)4423 dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
4424 {
4425 	if (bus->is_linkdown) {
4426 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4427 		return;
4428 	} else {
4429 		dhdpcie_os_wtcm8(bus, offset, data);
4430 	}
4431 }
4432 
4433 uint8
dhdpcie_bus_rtcm8(dhd_bus_t * bus,ulong offset)4434 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
4435 {
4436 	volatile uint8 data;
4437 	if (bus->is_linkdown) {
4438 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4439 		data = (uint8)-1;
4440 	} else {
4441 		data = dhdpcie_os_rtcm8(bus, offset);
4442 	}
4443 	return data;
4444 }
4445 
4446 void
dhdpcie_bus_wtcm32(dhd_bus_t * bus,ulong offset,uint32 data)4447 dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
4448 {
4449 	if (bus->is_linkdown) {
4450 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4451 		return;
4452 	} else {
4453 		dhdpcie_os_wtcm32(bus, offset, data);
4454 	}
4455 }
4456 void
dhdpcie_bus_wtcm16(dhd_bus_t * bus,ulong offset,uint16 data)4457 dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
4458 {
4459 	if (bus->is_linkdown) {
4460 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4461 		return;
4462 	} else {
4463 		dhdpcie_os_wtcm16(bus, offset, data);
4464 	}
4465 }
4466 #ifdef DHD_SUPPORT_64BIT
4467 void
dhdpcie_bus_wtcm64(dhd_bus_t * bus,ulong offset,uint64 data)4468 dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
4469 {
4470 	if (bus->is_linkdown) {
4471 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4472 		return;
4473 	} else {
4474 		dhdpcie_os_wtcm64(bus, offset, data);
4475 	}
4476 }
4477 #endif /* DHD_SUPPORT_64BIT */
4478 
4479 uint16
dhdpcie_bus_rtcm16(dhd_bus_t * bus,ulong offset)4480 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
4481 {
4482 	volatile uint16 data;
4483 	if (bus->is_linkdown) {
4484 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4485 		data = (uint16)-1;
4486 	} else {
4487 		data = dhdpcie_os_rtcm16(bus, offset);
4488 	}
4489 	return data;
4490 }
4491 
4492 uint32
dhdpcie_bus_rtcm32(dhd_bus_t * bus,ulong offset)4493 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
4494 {
4495 	volatile uint32 data;
4496 	if (bus->is_linkdown) {
4497 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4498 		data = (uint32)-1;
4499 	} else {
4500 		data = dhdpcie_os_rtcm32(bus, offset);
4501 	}
4502 	return data;
4503 }
4504 
4505 #ifdef DHD_SUPPORT_64BIT
4506 uint64
dhdpcie_bus_rtcm64(dhd_bus_t * bus,ulong offset)4507 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
4508 {
4509 	volatile uint64 data;
4510 	if (bus->is_linkdown) {
4511 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4512 		data = (uint64)-1;
4513 	} else {
4514 		data = dhdpcie_os_rtcm64(bus, offset);
4515 	}
4516 	return data;
4517 }
4518 #endif /* DHD_SUPPORT_64BIT */
4519 
4520 /** A snippet of dongle memory is shared between host and dongle */
4521 void
dhd_bus_cmn_writeshared(dhd_bus_t * bus,void * data,uint32 len,uint8 type,uint16 ringid)4522 dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid)
4523 {
4524 	uint64 long_data;
4525 	ulong addr; /* dongle address */
4526 
4527 	DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len));
4528 
4529 	if (bus->is_linkdown) {
4530 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
4531 		return;
4532 	}
4533 
4534 	if (MULTIBP_ENAB(bus->sih)) {
4535 		dhd_bus_pcie_pwr_req(bus);
4536 	}
4537 	switch (type) {
4538 		case D2H_DMA_SCRATCH_BUF:
4539 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer);
4540 			long_data = HTOL64(*(uint64 *)data);
4541 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4542 			if (dhd_msg_level & DHD_INFO_VAL) {
4543 				prhex(__FUNCTION__, data, len);
4544 			}
4545 			break;
4546 
4547 		case D2H_DMA_SCRATCH_BUF_LEN :
4548 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer_len);
4549 			dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4550 			if (dhd_msg_level & DHD_INFO_VAL) {
4551 				prhex(__FUNCTION__, data, len);
4552 			}
4553 			break;
4554 
4555 		case H2D_DMA_INDX_WR_BUF:
4556 			long_data = HTOL64(*(uint64 *)data);
4557 			addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_w_idx_hostaddr);
4558 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4559 			if (dhd_msg_level & DHD_INFO_VAL) {
4560 				prhex(__FUNCTION__, data, len);
4561 			}
4562 			break;
4563 
4564 		case H2D_DMA_INDX_RD_BUF:
4565 			long_data = HTOL64(*(uint64 *)data);
4566 			addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_r_idx_hostaddr);
4567 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4568 			if (dhd_msg_level & DHD_INFO_VAL) {
4569 				prhex(__FUNCTION__, data, len);
4570 			}
4571 			break;
4572 
4573 		case D2H_DMA_INDX_WR_BUF:
4574 			long_data = HTOL64(*(uint64 *)data);
4575 			addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_w_idx_hostaddr);
4576 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4577 			if (dhd_msg_level & DHD_INFO_VAL) {
4578 				prhex(__FUNCTION__, data, len);
4579 			}
4580 			break;
4581 
4582 		case D2H_DMA_INDX_RD_BUF:
4583 			long_data = HTOL64(*(uint64 *)data);
4584 			addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_r_idx_hostaddr);
4585 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4586 			if (dhd_msg_level & DHD_INFO_VAL) {
4587 				prhex(__FUNCTION__, data, len);
4588 			}
4589 			break;
4590 
4591 		case H2D_IFRM_INDX_WR_BUF:
4592 			long_data = HTOL64(*(uint64 *)data);
4593 			addr = DHD_RING_INFO_MEMBER_ADDR(bus, ifrm_w_idx_hostaddr);
4594 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4595 			if (dhd_msg_level & DHD_INFO_VAL) {
4596 				prhex(__FUNCTION__, data, len);
4597 			}
4598 			break;
4599 
4600 		case RING_ITEM_LEN :
4601 			addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, len_items);
4602 			dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4603 			break;
4604 
4605 		case RING_MAX_ITEMS :
4606 			addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, max_item);
4607 			dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4608 			break;
4609 
4610 		case RING_BUF_ADDR :
4611 			long_data = HTOL64(*(uint64 *)data);
4612 			addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, base_addr);
4613 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
4614 			if (dhd_msg_level & DHD_INFO_VAL) {
4615 				prhex(__FUNCTION__, data, len);
4616 			}
4617 			break;
4618 
4619 		case RING_WR_UPD :
4620 			addr = bus->ring_sh[ringid].ring_state_w;
4621 			dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4622 			break;
4623 
4624 		case RING_RD_UPD :
4625 			addr = bus->ring_sh[ringid].ring_state_r;
4626 			dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4627 			break;
4628 
4629 		case D2H_MB_DATA:
4630 			addr = bus->d2h_mb_data_ptr_addr;
4631 			dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4632 			break;
4633 
4634 		case H2D_MB_DATA:
4635 			addr = bus->h2d_mb_data_ptr_addr;
4636 			dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4637 			break;
4638 
4639 		case HOST_API_VERSION:
4640 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_cap);
4641 			dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4642 			break;
4643 
4644 		case DNGL_TO_HOST_TRAP_ADDR:
4645 			long_data = HTOL64(*(uint64 *)data);
4646 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_trap_addr);
4647 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
4648 			DHD_INFO(("Wrote trap addr:0x%x\n", (uint32) HTOL32(*(uint32 *)data)));
4649 			break;
4650 
4651 		case HOST_SCB_ADDR:
4652 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_addr);
4653 #ifdef DHD_SUPPORT_64BIT
4654 			dhdpcie_bus_wtcm64(bus, addr, (uint64) HTOL64(*(uint64 *)data));
4655 #else /* !DHD_SUPPORT_64BIT */
4656 			dhdpcie_bus_wtcm32(bus, addr, *((uint32*)data));
4657 #endif /* DHD_SUPPORT_64BIT */
4658 			DHD_INFO(("Wrote host_scb_addr:0x%x\n",
4659 				(uint32) HTOL32(*(uint32 *)data)));
4660 			break;
4661 
4662 		default:
4663 			break;
4664 	}
4665 	if (MULTIBP_ENAB(bus->sih)) {
4666 		dhd_bus_pcie_pwr_req_clear(bus);
4667 	}
4668 } /* dhd_bus_cmn_writeshared */
4669 
4670 /** A snippet of dongle memory is shared between host and dongle */
4671 void
dhd_bus_cmn_readshared(dhd_bus_t * bus,void * data,uint8 type,uint16 ringid)4672 dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
4673 {
4674 	ulong addr; /* dongle address */
4675 
4676 	if (MULTIBP_ENAB(bus->sih)) {
4677 		dhd_bus_pcie_pwr_req(bus);
4678 	}
4679 	switch (type) {
4680 		case RING_WR_UPD :
4681 			addr = bus->ring_sh[ringid].ring_state_w;
4682 			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4683 			break;
4684 
4685 		case RING_RD_UPD :
4686 			addr = bus->ring_sh[ringid].ring_state_r;
4687 			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4688 			break;
4689 
4690 		case TOTAL_LFRAG_PACKET_CNT :
4691 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, total_lfrag_pkt_cnt);
4692 			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4693 			break;
4694 
4695 		case H2D_MB_DATA:
4696 			addr = bus->h2d_mb_data_ptr_addr;
4697 			*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
4698 			break;
4699 
4700 		case D2H_MB_DATA:
4701 			addr = bus->d2h_mb_data_ptr_addr;
4702 			*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
4703 			break;
4704 
4705 		case MAX_HOST_RXBUFS :
4706 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, max_host_rxbufs);
4707 			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4708 			break;
4709 
4710 		case HOST_SCB_ADDR:
4711 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_size);
4712 			*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
4713 			break;
4714 
4715 		default :
4716 			break;
4717 	}
4718 	if (MULTIBP_ENAB(bus->sih)) {
4719 		dhd_bus_pcie_pwr_req_clear(bus);
4720 	}
4721 }
4722 
dhd_bus_get_sharedflags(dhd_bus_t * bus)4723 uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus)
4724 {
4725 	return ((pciedev_shared_t*)bus->pcie_sh)->flags;
4726 }
4727 
4728 void
dhd_bus_clearcounts(dhd_pub_t * dhdp)4729 dhd_bus_clearcounts(dhd_pub_t *dhdp)
4730 {
4731 }
4732 
4733 /**
4734  * @param params    input buffer, NULL for 'set' operation.
4735  * @param plen      length of 'params' buffer, 0 for 'set' operation.
4736  * @param arg       output buffer
4737  */
4738 int
dhd_bus_iovar_op(dhd_pub_t * dhdp,const char * name,void * params,int plen,void * arg,int len,bool set)4739 dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
4740                  void *params, int plen, void *arg, int len, bool set)
4741 {
4742 	dhd_bus_t *bus = dhdp->bus;
4743 	const bcm_iovar_t *vi = NULL;
4744 	int bcmerror = BCME_UNSUPPORTED;
4745 	int val_size;
4746 	uint32 actionid;
4747 
4748 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4749 
4750 	ASSERT(name);
4751 	ASSERT(len >= 0);
4752 	if (!name || len < 0)
4753 		return BCME_BADARG;
4754 
4755 	/* Get MUST have return space */
4756 	ASSERT(set || (arg && len));
4757 	if (!(set || (arg && len)))
4758 		return BCME_BADARG;
4759 
4760 	/* Set does NOT take qualifiers */
4761 	ASSERT(!set || (!params && !plen));
4762 	if (!(!set || (!params && !plen)))
4763 		return BCME_BADARG;
4764 
4765 	DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
4766 	         name, (set ? "set" : "get"), len, plen));
4767 
4768 	/* Look up var locally; if not found pass to host driver */
4769 	if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
4770 		goto exit;
4771 	}
4772 
4773 	if (MULTIBP_ENAB(bus->sih)) {
4774 		if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) {
4775 			DHD_ERROR(("%s: Bypass pwr request\n", __FUNCTION__));
4776 		} else {
4777 			dhd_bus_pcie_pwr_req(bus);
4778 		}
4779 	}
4780 
4781 	/* set up 'params' pointer in case this is a set command so that
4782 	 * the convenience int and bool code can be common to set and get
4783 	 */
4784 	if (params == NULL) {
4785 		params = arg;
4786 		plen = len;
4787 	}
4788 
4789 	if (vi->type == IOVT_VOID)
4790 		val_size = 0;
4791 	else if (vi->type == IOVT_BUFFER)
4792 		val_size = len;
4793 	else
4794 		/* all other types are integer sized */
4795 		val_size = sizeof(int);
4796 
4797 	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
4798 	bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
4799 
4800 exit:
4801 	/* In DEVRESET_QUIESCE/DEVRESET_ON,
4802 	 * this includes dongle re-attach which initialize pwr_req_ref count to 0 and
4803 	 * causes pwr_req_ref count miss-match in pwr req clear function and hang.
4804 	 * In this case, bypass pwr req clear.
4805 	 */
4806 	if (bcmerror == BCME_DNGL_DEVRESET) {
4807 		bcmerror = BCME_OK;
4808 	} else {
4809 		if (MULTIBP_ENAB(bus->sih)) {
4810 			if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) {
4811 				DHD_ERROR(("%s: Bypass pwr request clear\n", __FUNCTION__));
4812 			} else {
4813 				dhd_bus_pcie_pwr_req_clear(bus);
4814 			}
4815 		}
4816 	}
4817 	return bcmerror;
4818 } /* dhd_bus_iovar_op */
4819 
4820 #ifdef BCM_BUZZZ
4821 #include <bcm_buzzz.h>
4822 
4823 int
dhd_buzzz_dump_cntrs(char * p,uint32 * core,uint32 * log,const int num_counters)4824 dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log,
4825 	const int num_counters)
4826 {
4827 	int bytes = 0;
4828 	uint32 ctr;
4829 	uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX];
4830 	uint32 delta[BCM_BUZZZ_COUNTERS_MAX];
4831 
4832 	/* Compute elapsed counter values per counter event type */
4833 	for (ctr = 0U; ctr < num_counters; ctr++) {
4834 		prev[ctr] = core[ctr];
4835 		curr[ctr] = *log++;
4836 		core[ctr] = curr[ctr];  /* saved for next log */
4837 
4838 		if (curr[ctr] < prev[ctr])
4839 			delta[ctr] = curr[ctr] + (~0U - prev[ctr]);
4840 		else
4841 			delta[ctr] = (curr[ctr] - prev[ctr]);
4842 
4843 		bytes += sprintf(p + bytes, "%12u ", delta[ctr]);
4844 	}
4845 
4846 	return bytes;
4847 }
4848 
4849 typedef union cm3_cnts { /* export this in bcm_buzzz.h */
4850 	uint32 u32;
4851 	uint8  u8[4];
4852 	struct {
4853 		uint8 cpicnt;
4854 		uint8 exccnt;
4855 		uint8 sleepcnt;
4856 		uint8 lsucnt;
4857 	};
4858 } cm3_cnts_t;
4859 
4860 int
dhd_bcm_buzzz_dump_cntrs6(char * p,uint32 * core,uint32 * log)4861 dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log)
4862 {
4863 	int bytes = 0;
4864 
4865 	uint32 cyccnt, instrcnt;
4866 	cm3_cnts_t cm3_cnts;
4867 	uint8 foldcnt;
4868 
4869 	{   /* 32bit cyccnt */
4870 		uint32 curr, prev, delta;
4871 		prev = core[0]; curr = *log++; core[0] = curr;
4872 		if (curr < prev)
4873 			delta = curr + (~0U - prev);
4874 		else
4875 			delta = (curr - prev);
4876 
4877 		bytes += sprintf(p + bytes, "%12u ", delta);
4878 		cyccnt = delta;
4879 	}
4880 
4881 	{	/* Extract the 4 cnts: cpi, exc, sleep and lsu */
4882 		int i;
4883 		uint8 max8 = ~0;
4884 		cm3_cnts_t curr, prev, delta;
4885 		prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32;
4886 		for (i = 0; i < 4; i++) {
4887 			if (curr.u8[i] < prev.u8[i])
4888 				delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]);
4889 			else
4890 				delta.u8[i] = (curr.u8[i] - prev.u8[i]);
4891 			bytes += sprintf(p + bytes, "%4u ", delta.u8[i]);
4892 		}
4893 		cm3_cnts.u32 = delta.u32;
4894 	}
4895 
4896 	{   /* Extract the foldcnt from arg0 */
4897 		uint8 curr, prev, delta, max8 = ~0;
4898 		bcm_buzzz_arg0_t arg0; arg0.u32 = *log;
4899 		prev = core[2]; curr = arg0.klog.cnt; core[2] = curr;
4900 		if (curr < prev)
4901 			delta = curr + (max8 - prev);
4902 		else
4903 			delta = (curr - prev);
4904 		bytes += sprintf(p + bytes, "%4u ", delta);
4905 		foldcnt = delta;
4906 	}
4907 
4908 	instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2]
4909 		                 + cm3_cnts.u8[3]) + foldcnt;
4910 	if (instrcnt > 0xFFFFFF00)
4911 		bytes += sprintf(p + bytes, "[%10s] ", "~");
4912 	else
4913 		bytes += sprintf(p + bytes, "[%10u] ", instrcnt);
4914 	return bytes;
4915 }
4916 
4917 int
dhd_buzzz_dump_log(char * p,uint32 * core,uint32 * log,bcm_buzzz_t * buzzz)4918 dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz)
4919 {
4920 	int bytes = 0;
4921 	bcm_buzzz_arg0_t arg0;
4922 	static uint8 * fmt[] = BCM_BUZZZ_FMT_STRINGS;
4923 
4924 	if (buzzz->counters == 6) {
4925 		bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log);
4926 		log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
4927 	} else {
4928 		bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters);
4929 		log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */
4930 	}
4931 
4932 	/* Dump the logged arguments using the registered formats */
4933 	arg0.u32 = *log++;
4934 
4935 	switch (arg0.klog.args) {
4936 		case 0:
4937 			bytes += sprintf(p + bytes, fmt[arg0.klog.id]);
4938 			break;
4939 		case 1:
4940 		{
4941 			uint32 arg1 = *log++;
4942 			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1);
4943 			break;
4944 		}
4945 		case 2:
4946 		{
4947 			uint32 arg1, arg2;
4948 			arg1 = *log++; arg2 = *log++;
4949 			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2);
4950 			break;
4951 		}
4952 		case 3:
4953 		{
4954 			uint32 arg1, arg2, arg3;
4955 			arg1 = *log++; arg2 = *log++; arg3 = *log++;
4956 			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3);
4957 			break;
4958 		}
4959 		case 4:
4960 		{
4961 			uint32 arg1, arg2, arg3, arg4;
4962 			arg1 = *log++; arg2 = *log++;
4963 			arg3 = *log++; arg4 = *log++;
4964 			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4);
4965 			break;
4966 		}
4967 		default:
4968 			printf("Maximum one argument supported\n");
4969 			break;
4970 	}
4971 
4972 	bytes += sprintf(p + bytes, "\n");
4973 
4974 	return bytes;
4975 }
4976 
dhd_buzzz_dump(bcm_buzzz_t * buzzz_p,void * buffer_p,char * p)4977 void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p)
4978 {
4979 	int i;
4980 	uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX];
4981 	void * log;
4982 
4983 	for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) {
4984 		core[i] = 0;
4985 	}
4986 
4987 	log_sz = buzzz_p->log_sz;
4988 
4989 	part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz;
4990 
4991 	if (buzzz_p->wrap == TRUE) {
4992 		part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz;
4993 		total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz;
4994 	} else {
4995 		part2 = 0U;
4996 		total = buzzz_p->count;
4997 	}
4998 
4999 	if (total == 0U) {
5000 		printf("bcm_buzzz_dump total<%u> done\n", total);
5001 		return;
5002 	} else {
5003 		printf("bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n",
5004 		       total, part2, part1);
5005 	}
5006 
5007 	if (part2) {   /* with wrap */
5008 		log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log));
5009 		while (part2--) {   /* from cur to end : part2 */
5010 			p[0] = '\0';
5011 			dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
5012 			printf("%s", p);
5013 			log = (void*)((size_t)log + buzzz_p->log_sz);
5014 		}
5015 	}
5016 
5017 	log = (void*)buffer_p;
5018 	while (part1--) {
5019 		p[0] = '\0';
5020 		dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
5021 		printf("%s", p);
5022 		log = (void*)((size_t)log + buzzz_p->log_sz);
5023 	}
5024 
5025 	printf("bcm_buzzz_dump done.\n");
5026 }
5027 
dhd_buzzz_dump_dngl(dhd_bus_t * bus)5028 int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
5029 {
5030 	bcm_buzzz_t * buzzz_p = NULL;
5031 	void * buffer_p = NULL;
5032 	char * page_p = NULL;
5033 	pciedev_shared_t *sh;
5034 	int ret = 0;
5035 
5036 	if (bus->dhd->busstate != DHD_BUS_DATA) {
5037 		return BCME_UNSUPPORTED;
5038 	}
5039 	if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) {
5040 		printf("Page memory allocation failure\n");
5041 		goto done;
5042 	}
5043 	if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) {
5044 		printf("BCM BUZZZ memory allocation failure\n");
5045 		goto done;
5046 	}
5047 
5048 	ret = dhdpcie_readshared(bus);
5049 	if (ret < 0) {
5050 		DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
5051 		goto done;
5052 	}
5053 
5054 	sh = bus->pcie_sh;
5055 
5056 	DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzz_dbg_ptr));
5057 
5058 	if (sh->buzz_dbg_ptr != 0U) {	/* Fetch and display dongle BUZZZ Trace */
5059 
5060 		dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzz_dbg_ptr,
5061 		                     (uint8 *)buzzz_p, sizeof(bcm_buzzz_t));
5062 
5063 		printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> "
5064 			"count<%u> status<%u> wrap<%u>\n"
5065 			"cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n",
5066 			(int)sh->buzz_dbg_ptr,
5067 			(int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end,
5068 			buzzz_p->count, buzzz_p->status, buzzz_p->wrap,
5069 			buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group,
5070 			buzzz_p->buffer_sz, buzzz_p->log_sz);
5071 
5072 		if (buzzz_p->count == 0) {
5073 			printf("Empty dongle BUZZZ trace\n\n");
5074 			goto done;
5075 		}
5076 
5077 		/* Allocate memory for trace buffer and format strings */
5078 		buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
5079 		if (buffer_p == NULL) {
5080 			printf("Buffer memory allocation failure\n");
5081 			goto done;
5082 		}
5083 
5084 		/* Fetch the trace. format strings are exported via bcm_buzzz.h */
5085 		dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log,   /* Trace */
5086 		                     (uint8 *)buffer_p, buzzz_p->buffer_sz);
5087 
5088 		/* Process and display the trace using formatted output */
5089 
5090 		{
5091 			int ctr;
5092 			for (ctr = 0; ctr < buzzz_p->counters; ctr++) {
5093 				printf("<Evt[%02X]> ", buzzz_p->eventid[ctr]);
5094 			}
5095 			printf("<code execution point>\n");
5096 		}
5097 
5098 		dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
5099 
5100 		printf("----- End of dongle BCM BUZZZ Trace -----\n\n");
5101 
5102 		MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL;
5103 	}
5104 
5105 done:
5106 
5107 	if (page_p)   MFREE(bus->dhd->osh, page_p, 4096);
5108 	if (buzzz_p)  MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t));
5109 	if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
5110 
5111 	return BCME_OK;
5112 }
5113 #endif /* BCM_BUZZZ */
5114 
5115 #define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) &&	\
5116 	((sih)->buscoretype == PCIE2_CORE_ID))
5117 #ifdef DHD_PCIE_REG_ACCESS
5118 static bool
pcie2_mdiosetblock(dhd_bus_t * bus,uint blk)5119 pcie2_mdiosetblock(dhd_bus_t *bus, uint blk)
5120 {
5121 	uint mdiodata, mdioctrl, i = 0;
5122 	uint pcie_serdes_spinwait = 200;
5123 
5124 	mdioctrl = MDIOCTL2_DIVISOR_VAL | (0x1F << MDIOCTL2_REGADDR_SHF);
5125 	mdiodata = (blk << MDIODATA2_DEVADDR_SHF) | MDIODATA2_DONE;
5126 
5127 	si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdioctrl);
5128 	si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0, mdiodata);
5129 
5130 	OSL_DELAY(10);
5131 	/* retry till the transaction is complete */
5132 	while (i < pcie_serdes_spinwait) {
5133 		uint mdioctrl_read = si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA,
5134 			0, 0);
5135 		if (!(mdioctrl_read & MDIODATA2_DONE)) {
5136 			break;
5137 		}
5138 		OSL_DELAY(1000);
5139 		i++;
5140 	}
5141 
5142 	if (i >= pcie_serdes_spinwait) {
5143 		DHD_ERROR(("pcie_mdiosetblock: timed out\n"));
5144 		return FALSE;
5145 	}
5146 
5147 	return TRUE;
5148 }
5149 #endif /* DHD_PCIE_REG_ACCESS */
5150 
5151 #define PCIE_FLR_CAPAB_BIT		28
5152 #define PCIE_FUNCTION_LEVEL_RESET_BIT	15
5153 
5154 /* Change delays for only QT HW, FPGA and silicon uses same delay */
5155 #ifdef BCMQT_HW
5156 #define DHD_FUNCTION_LEVEL_RESET_DELAY		300000u
5157 #define DHD_SSRESET_STATUS_RETRY_DELAY	10000u
5158 #else
5159 #define DHD_FUNCTION_LEVEL_RESET_DELAY	70u	/* 70 msec delay */
5160 #define DHD_SSRESET_STATUS_RETRY_DELAY	40u
5161 #endif // endif
5162 /*
5163  * Increase SSReset de-assert time to 8ms.
5164  * since it takes longer time if re-scan time on 4378B0.
5165  */
5166 #define DHD_SSRESET_STATUS_RETRIES	200u
5167 
5168 static void
dhdpcie_enum_reg_init(dhd_bus_t * bus)5169 dhdpcie_enum_reg_init(dhd_bus_t *bus)
5170 {
5171 	/* initialize Function control register (clear bit 4) to HW init value */
5172 	si_corereg(bus->sih, bus->sih->buscoreidx,
5173 		OFFSETOF(sbpcieregs_t, ftn_ctrl.control), ~0,
5174 		PCIE_CPLCA_ENABLE | PCIE_DLY_PERST_TO_COE);
5175 
5176 	/* clear IntMask */
5177 	si_corereg(bus->sih, bus->sih->buscoreidx,
5178 		OFFSETOF(sbpcieregs_t, ftn_ctrl.intmask), ~0, 0);
5179 	/* clear IntStatus */
5180 	si_corereg(bus->sih, bus->sih->buscoreidx,
5181 		OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), ~0,
5182 		si_corereg(bus->sih, bus->sih->buscoreidx,
5183 			OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), 0, 0));
5184 
5185 	/* clear MSIVector */
5186 	si_corereg(bus->sih, bus->sih->buscoreidx,
5187 		OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_vector), ~0, 0);
5188 	/* clear MSIIntMask */
5189 	si_corereg(bus->sih, bus->sih->buscoreidx,
5190 		OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intmask), ~0, 0);
5191 	/* clear MSIIntStatus */
5192 	si_corereg(bus->sih, bus->sih->buscoreidx,
5193 		OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), ~0,
5194 		si_corereg(bus->sih, bus->sih->buscoreidx,
5195 			OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), 0, 0));
5196 
5197 	/* clear PowerIntMask */
5198 	si_corereg(bus->sih, bus->sih->buscoreidx,
5199 		OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intmask), ~0, 0);
5200 	/* clear PowerIntStatus */
5201 	si_corereg(bus->sih, bus->sih->buscoreidx,
5202 		OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), ~0,
5203 		si_corereg(bus->sih, bus->sih->buscoreidx,
5204 			OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), 0, 0));
5205 
5206 	/* clear MailboxIntMask */
5207 	si_corereg(bus->sih, bus->sih->buscoreidx,
5208 		OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intmask), ~0, 0);
5209 	/* clear MailboxInt */
5210 	si_corereg(bus->sih, bus->sih->buscoreidx,
5211 		OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), ~0,
5212 		si_corereg(bus->sih, bus->sih->buscoreidx,
5213 			OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), 0, 0));
5214 }
5215 
5216 int
dhd_bus_perform_flr(dhd_bus_t * bus,bool force_fail)5217 dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail)
5218 {
5219 	uint flr_capab;
5220 	uint val;
5221 	int retry = 0;
5222 
5223 	DHD_ERROR(("******** Perform FLR ********\n"));
5224 
5225 	if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
5226 		if (bus->pcie_mailbox_mask != 0) {
5227 			dhdpcie_bus_intr_disable(bus);
5228 		}
5229 		/* initialize F0 enum registers before FLR for rev66/67 */
5230 		dhdpcie_enum_reg_init(bus);
5231 	}
5232 
5233 	/* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */
5234 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CAPABILITY, sizeof(val));
5235 	flr_capab =  val & (1 << PCIE_FLR_CAPAB_BIT);
5236 	DHD_INFO(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n",
5237 		PCIE_CFG_DEVICE_CAPABILITY, val, flr_capab));
5238 	if (!flr_capab) {
5239 	       DHD_ERROR(("Chip does not support FLR\n"));
5240 	       return BCME_UNSUPPORTED;
5241 	}
5242 	/* WAR: Disable FLR reset  For H2 chip to perform legacy reset */
5243 	else if ((bus->sih->chip == CYW55560_CHIP_ID) || (bus->sih->chip == BCM4375_CHIP_ID)) {
5244 		DHD_INFO(("H2/4375 CHIP return unsupported\n"));
5245 		return BCME_UNSUPPORTED;
5246 	}
5247 
5248 	/* Save pcie config space */
5249 	DHD_INFO(("Save Pcie Config Space\n"));
5250 	DHD_PCIE_CONFIG_SAVE(bus);
5251 
5252 	/* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */
5253 	DHD_INFO(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
5254 		PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
5255 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
5256 	DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
5257 	val = val | (1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
5258 	DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
5259 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
5260 
5261 	/* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */
5262 	DHD_INFO(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY));
5263 	OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY * 1000u);
5264 
5265 	if (force_fail) {
5266 		DHD_ERROR(("Set PCIE_SSRESET_DISABLE_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n",
5267 			PCIE_SSRESET_DISABLE_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
5268 		val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
5269 		DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
5270 			val));
5271 		val = val | (1 << PCIE_SSRESET_DISABLE_BIT);
5272 		DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
5273 			val));
5274 		OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val);
5275 
5276 		val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
5277 		DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
5278 			val));
5279 	}
5280 
5281 	/* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */
5282 	DHD_INFO(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
5283 		PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
5284 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
5285 	DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
5286 	val = val & ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
5287 	DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
5288 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
5289 
5290 	/* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
5291 	DHD_INFO(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)"
5292 		"is cleared\n",	PCIE_SSRESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
5293 	do {
5294 		val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
5295 		DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
5296 			PCIE_CFG_SUBSYSTEM_CONTROL, val));
5297 		val = val & (1 << PCIE_SSRESET_STATUS_BIT);
5298 		OSL_DELAY(DHD_SSRESET_STATUS_RETRY_DELAY);
5299 	} while (val && (retry++ < DHD_SSRESET_STATUS_RETRIES));
5300 
5301 	if (val) {
5302 		DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5303 			PCIE_CFG_SUBSYSTEM_CONTROL, PCIE_SSRESET_STATUS_BIT));
5304 		/* User has to fire the IOVAR again, if force_fail is needed */
5305 		if (force_fail) {
5306 			bus->flr_force_fail = FALSE;
5307 			DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__));
5308 		}
5309 		return BCME_DONGLE_DOWN;
5310 	}
5311 
5312 	/* Restore pcie config space */
5313 	DHD_INFO(("Restore Pcie Config Space\n"));
5314 	DHD_PCIE_CONFIG_RESTORE(bus);
5315 
5316 	DHD_ERROR(("******** FLR Succedeed ********\n"));
5317 
5318 	return BCME_OK;
5319 }
5320 
5321 #ifdef DHD_USE_BP_RESET
5322 #define DHD_BP_RESET_ASPM_DISABLE_DELAY	500u	/* usec */
5323 
5324 #define DHD_BP_RESET_STATUS_RETRY_DELAY	40u	/* usec */
5325 #define DHD_BP_RESET_STATUS_RETRIES	50u
5326 
5327 #define PCIE_CFG_SPROM_CTRL_SB_RESET_BIT	10
5328 #define PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT	21
5329 int
dhd_bus_perform_bp_reset(struct dhd_bus * bus)5330 dhd_bus_perform_bp_reset(struct dhd_bus *bus)
5331 {
5332 	uint val;
5333 	int retry = 0;
5334 	uint dar_clk_ctrl_status_reg = DAR_CLK_CTRL(bus->sih->buscorerev);
5335 	int ret = BCME_OK;
5336 	bool cond;
5337 
5338 	DHD_ERROR(("******** Perform BP reset ********\n"));
5339 
5340 	/* Disable ASPM */
5341 	DHD_INFO(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
5342 		PCIECFGREG_LINK_STATUS_CTRL));
5343 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
5344 	DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5345 	val = val & (~PCIE_ASPM_ENAB);
5346 	DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5347 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
5348 
5349 	/* wait for delay usec */
5350 	DHD_INFO(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY));
5351 	OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY);
5352 
5353 	/* Set bit 10 of PCIECFGREG_SPROM_CTRL */
5354 	DHD_INFO(("Set PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of PCIECFGREG_SPROM_CTRL(0x%x)\n",
5355 		PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
5356 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
5357 	DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
5358 	val = val | (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
5359 	DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
5360 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val), val);
5361 
5362 	/* Wait till bit backplane reset is ASSERTED i,e
5363 	 * bit 10 of PCIECFGREG_SPROM_CTRL is cleared.
5364 	 * Only after this, poll for 21st bit of DAR reg 0xAE0 is valid
5365 	 * else DAR register will read previous old value
5366 	 */
5367 	DHD_INFO(("Wait till PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of "
5368 		"PCIECFGREG_SPROM_CTRL(0x%x) is cleared\n",
5369 		PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
5370 	do {
5371 		val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
5372 		DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
5373 		cond = val & (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
5374 		OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
5375 	} while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
5376 
5377 	if (cond) {
5378 		DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5379 			PCIECFGREG_SPROM_CTRL, PCIE_CFG_SPROM_CTRL_SB_RESET_BIT));
5380 		ret = BCME_ERROR;
5381 		goto aspm_enab;
5382 	}
5383 
5384 	/* Wait till bit 21 of dar_clk_ctrl_status_reg is cleared */
5385 	DHD_INFO(("Wait till PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT(%d) of "
5386 		"dar_clk_ctrl_status_reg(0x%x) is cleared\n",
5387 		PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT, dar_clk_ctrl_status_reg));
5388 	do {
5389 		val = si_corereg(bus->sih, bus->sih->buscoreidx,
5390 			dar_clk_ctrl_status_reg, 0, 0);
5391 		DHD_INFO(("read_dar si_corereg: reg=0x%x read val=0x%x\n",
5392 			dar_clk_ctrl_status_reg, val));
5393 		cond = val & (1 << PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT);
5394 		OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
5395 	} while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
5396 
5397 	if (cond) {
5398 		DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5399 			dar_clk_ctrl_status_reg, PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT));
5400 		ret = BCME_ERROR;
5401 	}
5402 
5403 aspm_enab:
5404 	/* Enable ASPM */
5405 	DHD_INFO(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
5406 		PCIECFGREG_LINK_STATUS_CTRL));
5407 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
5408 	DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5409 	val = val | (PCIE_ASPM_L1_ENAB);
5410 	DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5411 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
5412 
5413 	DHD_ERROR(("******** BP reset Succedeed ********\n"));
5414 
5415 	return ret;
5416 }
5417 #endif /* DHD_USE_BP_RESET */
5418 
5419 int
dhd_bus_devreset(dhd_pub_t * dhdp,uint8 flag)5420 dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
5421 {
5422 	dhd_bus_t *bus = dhdp->bus;
5423 	int bcmerror = 0;
5424 	unsigned long flags;
5425 	unsigned long flags_bus;
5426 #ifdef CONFIG_ARCH_MSM
5427 	int retry = POWERUP_MAX_RETRY;
5428 #endif /* CONFIG_ARCH_MSM */
5429 
5430 	if (flag == TRUE) { /* Turn off WLAN */
5431 		/* Removing Power */
5432 		DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
5433 		DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
5434 		bus->dhd->up = FALSE;
5435 
5436 		/* wait for other contexts to finish -- if required a call
5437 		* to OSL_DELAY for 1s can be added to give other contexts
5438 		* a chance to finish
5439 		*/
5440 		dhdpcie_advertise_bus_cleanup(bus->dhd);
5441 
5442 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
5443 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5444 			atomic_set(&bus->dhd->block_bus, TRUE);
5445 			dhd_flush_rx_tx_wq(bus->dhd);
5446 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5447 
5448 #ifdef BCMPCIE_OOB_HOST_WAKE
5449 			/* Clean up any pending host wake IRQ */
5450 			dhd_bus_oob_intr_set(bus->dhd, FALSE);
5451 			dhd_bus_oob_intr_unregister(bus->dhd);
5452 #endif /* BCMPCIE_OOB_HOST_WAKE */
5453 			dhd_os_wd_timer(dhdp, 0);
5454 			dhd_bus_stop(bus, TRUE);
5455 			if (bus->intr) {
5456 				DHD_BUS_LOCK(bus->bus_lock, flags_bus);
5457 				dhdpcie_bus_intr_disable(bus);
5458 				DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
5459 				dhdpcie_free_irq(bus);
5460 			}
5461 			dhd_deinit_bus_lock(bus);
5462 			dhd_deinit_backplane_access_lock(bus);
5463 			dhd_bus_release_dongle(bus);
5464 			dhdpcie_bus_free_resource(bus);
5465 			bcmerror = dhdpcie_bus_disable_device(bus);
5466 			if (bcmerror) {
5467 				DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
5468 					__FUNCTION__, bcmerror));
5469 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5470 				atomic_set(&bus->dhd->block_bus, FALSE);
5471 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5472 			}
5473 			/* Clean up protocol data after Bus Master Enable bit clear
5474 			 * so that host can safely unmap DMA and remove the allocated buffers
5475 			 * from the PKTID MAP. Some Applicantion Processors supported
5476 			 * System MMU triggers Kernel panic when they detect to attempt to
5477 			 * DMA-unmapped memory access from the devices which use the
5478 			 * System MMU. Therefore, Kernel panic can be happened since it is
5479 			 * possible that dongle can access to DMA-unmapped memory after
5480 			 * calling the dhd_prot_reset().
5481 			 * For this reason, the dhd_prot_reset() and dhd_clear() functions
5482 			 * should be located after the dhdpcie_bus_disable_device().
5483 			 */
5484 			dhd_prot_reset(dhdp);
5485 			dhd_clear(dhdp);
5486 #ifdef CONFIG_ARCH_MSM
5487 			bcmerror = dhdpcie_bus_clock_stop(bus);
5488 			if (bcmerror) {
5489 				DHD_ERROR(("%s: host clock stop failed: %d\n",
5490 					__FUNCTION__, bcmerror));
5491 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5492 				atomic_set(&bus->dhd->block_bus, FALSE);
5493 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5494 				goto done;
5495 			}
5496 #endif /* CONFIG_ARCH_MSM */
5497 			DHD_GENERAL_LOCK(bus->dhd, flags);
5498 			DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
5499 			bus->dhd->busstate = DHD_BUS_DOWN;
5500 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
5501 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5502 			atomic_set(&bus->dhd->block_bus, FALSE);
5503 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5504 		} else {
5505 			if (bus->intr) {
5506 				dhdpcie_free_irq(bus);
5507 			}
5508 #ifdef BCMPCIE_OOB_HOST_WAKE
5509 			/* Clean up any pending host wake IRQ */
5510 			dhd_bus_oob_intr_set(bus->dhd, FALSE);
5511 			dhd_bus_oob_intr_unregister(bus->dhd);
5512 #endif /* BCMPCIE_OOB_HOST_WAKE */
5513 			dhd_dpc_kill(bus->dhd);
5514 			if (!bus->no_bus_init) {
5515 				dhd_bus_release_dongle(bus);
5516 				dhdpcie_bus_free_resource(bus);
5517 				bcmerror = dhdpcie_bus_disable_device(bus);
5518 				if (bcmerror) {
5519 					DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
5520 						__FUNCTION__, bcmerror));
5521 				}
5522 
5523 				/* Clean up protocol data after Bus Master Enable bit clear
5524 				 * so that host can safely unmap DMA and remove the allocated
5525 				 * buffers from the PKTID MAP. Some Applicantion Processors
5526 				 * supported System MMU triggers Kernel panic when they detect
5527 				 * to attempt to DMA-unmapped memory access from the devices
5528 				 * which use the System MMU.
5529 				 * Therefore, Kernel panic can be happened since it is possible
5530 				 * that dongle can access to DMA-unmapped memory after calling
5531 				 * the dhd_prot_reset().
5532 				 * For this reason, the dhd_prot_reset() and dhd_clear() functions
5533 				 * should be located after the dhdpcie_bus_disable_device().
5534 				 */
5535 				dhd_prot_reset(dhdp);
5536 				dhd_clear(dhdp);
5537 			} else {
5538 				bus->no_bus_init = FALSE;
5539 			}
5540 #ifdef CONFIG_ARCH_MSM
5541 			bcmerror = dhdpcie_bus_clock_stop(bus);
5542 			if (bcmerror) {
5543 				DHD_ERROR(("%s: host clock stop failed: %d\n",
5544 					__FUNCTION__, bcmerror));
5545 				goto done;
5546 			}
5547 #endif  /* CONFIG_ARCH_MSM */
5548 		}
5549 
5550 		bus->dhd->dongle_reset = TRUE;
5551 		DHD_ERROR(("%s:  WLAN OFF Done\n", __FUNCTION__));
5552 
5553 	} else { /* Turn on WLAN */
5554 		if (bus->dhd->busstate == DHD_BUS_DOWN) {
5555 			/* Powering On */
5556 			DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
5557 #ifdef CONFIG_ARCH_MSM
5558 			while (--retry) {
5559 				bcmerror = dhdpcie_bus_clock_start(bus);
5560 				if (!bcmerror) {
5561 					DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
5562 						__FUNCTION__));
5563 					break;
5564 				} else {
5565 					OSL_SLEEP(10);
5566 				}
5567 			}
5568 
5569 			if (bcmerror && !retry) {
5570 				DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
5571 					__FUNCTION__, bcmerror));
5572 				goto done;
5573 			}
5574 #if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
5575 			dhd_bus_aspm_enable_rc_ep(bus, FALSE);
5576 #endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
5577 #endif /* CONFIG_ARCH_MSM */
5578 			bus->is_linkdown = 0;
5579 			bus->cto_triggered = 0;
5580 #ifdef SUPPORT_LINKDOWN_RECOVERY
5581 			bus->read_shm_fail = FALSE;
5582 #endif /* SUPPORT_LINKDOWN_RECOVERY */
5583 			bcmerror = dhdpcie_bus_enable_device(bus);
5584 			if (bcmerror) {
5585 				DHD_ERROR(("%s: host configuration restore failed: %d\n",
5586 					__FUNCTION__, bcmerror));
5587 				goto done;
5588 			}
5589 
5590 			bcmerror = dhdpcie_bus_alloc_resource(bus);
5591 			if (bcmerror) {
5592 				DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
5593 					__FUNCTION__, bcmerror));
5594 				goto done;
5595 			}
5596 
5597 			bcmerror = dhdpcie_bus_dongle_attach(bus);
5598 			if (bcmerror) {
5599 				DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
5600 					__FUNCTION__, bcmerror));
5601 				goto done;
5602 			}
5603 
5604 			bcmerror = dhd_bus_request_irq(bus);
5605 			if (bcmerror) {
5606 				DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
5607 					__FUNCTION__, bcmerror));
5608 				goto done;
5609 			}
5610 
5611 			bus->dhd->dongle_reset = FALSE;
5612 
5613 #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
5614 			dhd_irq_set_affinity(bus->dhd, cpumask_of(1));
5615 #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
5616 
5617 			bcmerror = dhd_bus_start(dhdp);
5618 			if (bcmerror) {
5619 				DHD_ERROR(("%s: dhd_bus_start: %d\n",
5620 					__FUNCTION__, bcmerror));
5621 				goto done;
5622 			}
5623 
5624 			bus->dhd->up = TRUE;
5625 			/* Renabling watchdog which is disabled in dhdpcie_advertise_bus_cleanup */
5626 			if (bus->dhd->dhd_watchdog_ms_backup) {
5627 				DHD_ERROR(("%s: Enabling wdtick after dhd init\n",
5628 					__FUNCTION__));
5629 				dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
5630 			}
5631 			DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
5632 		} else {
5633 			DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
5634 			goto done;
5635 		}
5636 	}
5637 
5638 done:
5639 	if (bcmerror) {
5640 		DHD_GENERAL_LOCK(bus->dhd, flags);
5641 		DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
5642 		bus->dhd->busstate = DHD_BUS_DOWN;
5643 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
5644 	}
5645 	return bcmerror;
5646 }
5647 
5648 #ifdef DHD_PCIE_REG_ACCESS
5649 static int
pcie2_mdioop(dhd_bus_t * bus,uint physmedia,uint regaddr,bool write,uint * val,bool slave_bypass)5650 pcie2_mdioop(dhd_bus_t *bus, uint physmedia, uint regaddr, bool write, uint *val,
5651 	bool slave_bypass)
5652 {
5653 	uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl;
5654 	uint32 reg32;
5655 
5656 	pcie2_mdiosetblock(bus, physmedia);
5657 
5658 	/* enable mdio access to SERDES */
5659 	mdio_ctrl = MDIOCTL2_DIVISOR_VAL;
5660 	mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF);
5661 
5662 	if (slave_bypass)
5663 		mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS;
5664 
5665 	if (!write)
5666 		mdio_ctrl |= MDIOCTL2_READ;
5667 
5668 	si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdio_ctrl);
5669 
5670 	if (write) {
5671 		reg32 =  PCIE2_MDIO_WR_DATA;
5672 		si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0,
5673 			*val | MDIODATA2_DONE);
5674 	} else
5675 		reg32 =  PCIE2_MDIO_RD_DATA;
5676 
5677 	/* retry till the transaction is complete */
5678 	while (i < pcie_serdes_spinwait) {
5679 		uint done_val =  si_corereg(bus->sih, bus->sih->buscoreidx, reg32, 0, 0);
5680 		if (!(done_val & MDIODATA2_DONE)) {
5681 			if (!write) {
5682 				*val = si_corereg(bus->sih, bus->sih->buscoreidx,
5683 					PCIE2_MDIO_RD_DATA, 0, 0);
5684 				*val = *val & MDIODATA2_MASK;
5685 			}
5686 			return 0;
5687 		}
5688 		OSL_DELAY(1000);
5689 		i++;
5690 	}
5691 	return -1;
5692 }
5693 #endif /* DHD_PCIE_REG_ACCESS */
5694 
5695 /* si_backplane_access() manages a shared resource - BAR0 mapping, hence its
5696  * calls shall be serialized. This wrapper function provides such serialization
5697  * and shall be used everywjer einstead of direct call of si_backplane_access()
5698  *
5699  * Linux DHD driver calls si_backplane_access() from 3 three contexts: tasklet
5700  * (that may call dhdpcie_sssr_dump() from dhdpcie_sssr_dump()), iovar
5701  * ("sbreg", "membyres", etc.) and procfs (used by GDB proxy). To avoid race
5702  * conditions calls of si_backplane_access() shall be serialized. Presence of
5703  * tasklet context implies that serialization shall b ebased on spinlock. Hence
5704  * Linux implementation of dhd_pcie_backplane_access_[un]lock() is
5705  * spinlock-based.
5706  *
5707  * Other platforms may add their own implementations of
5708  * dhd_pcie_backplane_access_[un]lock() as needed (e.g. if serialization is not
5709  * needed implementation might be empty)
5710  */
5711 static uint
serialized_backplane_access(dhd_bus_t * bus,uint addr,uint size,uint * val,bool read)5712 serialized_backplane_access(dhd_bus_t *bus, uint addr, uint size, uint *val, bool read)
5713 {
5714 	uint ret;
5715 	unsigned long flags;
5716 	DHD_BACKPLANE_ACCESS_LOCK(bus->backplane_access_lock, flags);
5717 	ret = si_backplane_access(bus->sih, addr, size, val, read);
5718 	DHD_BACKPLANE_ACCESS_UNLOCK(bus->backplane_access_lock, flags);
5719 	return ret;
5720 }
5721 
5722 static int
dhdpcie_get_dma_ring_indices(dhd_pub_t * dhd)5723 dhdpcie_get_dma_ring_indices(dhd_pub_t *dhd)
5724 {
5725 	int h2d_support, d2h_support;
5726 
5727 	d2h_support = dhd->dma_d2h_ring_upd_support ? 1 : 0;
5728 	h2d_support = dhd->dma_h2d_ring_upd_support ? 1 : 0;
5729 	return (d2h_support | (h2d_support << 1));
5730 
5731 }
5732 int
dhdpcie_set_dma_ring_indices(dhd_pub_t * dhd,int32 int_val)5733 dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val)
5734 {
5735 	int bcmerror = 0;
5736 	/* Can change it only during initialization/FW download */
5737 	if (dhd->busstate == DHD_BUS_DOWN) {
5738 		if ((int_val > 3) || (int_val < 0)) {
5739 			DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
5740 			bcmerror = BCME_BADARG;
5741 		} else {
5742 			dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
5743 			dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
5744 			dhd->dma_ring_upd_overwrite = TRUE;
5745 		}
5746 	} else {
5747 		DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5748 			__FUNCTION__));
5749 		bcmerror = BCME_NOTDOWN;
5750 	}
5751 
5752 	return bcmerror;
5753 
5754 }
5755 /**
5756  * IOVAR handler of the DHD bus layer (in this case, the PCIe bus).
5757  *
5758  * @param actionid  e.g. IOV_SVAL(IOV_PCIEREG)
5759  * @param params    input buffer
5760  * @param plen      length in [bytes] of input buffer 'params'
5761  * @param arg       output buffer
5762  * @param len       length in [bytes] of output buffer 'arg'
5763  */
5764 static int
dhdpcie_bus_doiovar(dhd_bus_t * bus,const bcm_iovar_t * vi,uint32 actionid,const char * name,void * params,int plen,void * arg,int len,int val_size)5765 dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
5766                 void *params, int plen, void *arg, int len, int val_size)
5767 {
5768 	int bcmerror = 0;
5769 	int32 int_val = 0;
5770 	int32 int_val2 = 0;
5771 	int32 int_val3 = 0;
5772 	bool bool_val = 0;
5773 
5774 	DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
5775 	           __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
5776 
5777 	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
5778 		goto exit;
5779 
5780 	if (plen >= (int)sizeof(int_val))
5781 		bcopy(params, &int_val, sizeof(int_val));
5782 
5783 	if (plen >= (int)sizeof(int_val) * 2)
5784 		bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2));
5785 
5786 	if (plen >= (int)sizeof(int_val) * 3)
5787 		bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3));
5788 
5789 	bool_val = (int_val != 0) ? TRUE : FALSE;
5790 
5791 	/* Check if dongle is in reset. If so, only allow DEVRESET iovars */
5792 	if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
5793 	                                actionid == IOV_GVAL(IOV_DEVRESET))) {
5794 		bcmerror = BCME_NOTREADY;
5795 		goto exit;
5796 	}
5797 
5798 	switch (actionid) {
5799 
5800 	case IOV_SVAL(IOV_VARS):
5801 		bcmerror = dhdpcie_downloadvars(bus, arg, len);
5802 		break;
5803 #ifdef DHD_PCIE_REG_ACCESS
5804 	case IOV_SVAL(IOV_PCIEREG):
5805 		si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
5806 			int_val);
5807 		si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
5808 			int_val2);
5809 		break;
5810 
5811 	case IOV_GVAL(IOV_PCIEREG):
5812 		si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
5813 			int_val);
5814 		int_val = si_corereg(bus->sih, bus->sih->buscoreidx,
5815 			OFFSETOF(sbpcieregs_t, configdata), 0, 0);
5816 		bcopy(&int_val, arg, sizeof(int_val));
5817 		break;
5818 
5819 	case IOV_SVAL(IOV_PCIECOREREG):
5820 		si_corereg(bus->sih, bus->sih->buscoreidx, int_val, ~0, int_val2);
5821 		break;
5822 	case IOV_GVAL(IOV_BAR0_SECWIN_REG):
5823 	{
5824 		sdreg_t sdreg;
5825 		uint32 addr, size;
5826 
5827 		bcopy(params, &sdreg, sizeof(sdreg));
5828 
5829 		addr = sdreg.offset;
5830 		size = sdreg.func;
5831 
5832 		if (serialized_backplane_access(bus, addr, size, (uint *)&int_val, TRUE) != BCME_OK)
5833 		{
5834 			DHD_ERROR(("Invalid size/addr combination \n"));
5835 			bcmerror = BCME_ERROR;
5836 			break;
5837 		}
5838 		bcopy(&int_val, arg, sizeof(int32));
5839 		break;
5840 	}
5841 
5842 	case IOV_SVAL(IOV_BAR0_SECWIN_REG):
5843 	{
5844 		sdreg_t sdreg;
5845 		uint32 addr, size;
5846 
5847 		bcopy(params, &sdreg, sizeof(sdreg));
5848 
5849 		addr = sdreg.offset;
5850 		size = sdreg.func;
5851 		if (serialized_backplane_access(bus, addr, size,
5852 			(uint *)(&sdreg.value), FALSE) != BCME_OK) {
5853 			DHD_ERROR(("Invalid size/addr combination \n"));
5854 			bcmerror = BCME_ERROR;
5855 		}
5856 		break;
5857 	}
5858 
5859 	case IOV_GVAL(IOV_SBREG):
5860 	{
5861 		sdreg_t sdreg;
5862 		uint32 addr, size;
5863 
5864 		bcopy(params, &sdreg, sizeof(sdreg));
5865 
5866 		addr = sdreg.offset | SI_ENUM_BASE(bus->sih);
5867 		size = sdreg.func;
5868 
5869 		if (serialized_backplane_access(bus, addr, size, (uint *)&int_val, TRUE) != BCME_OK)
5870 		{
5871 			DHD_ERROR(("Invalid size/addr combination \n"));
5872 			bcmerror = BCME_ERROR;
5873 			break;
5874 		}
5875 		bcopy(&int_val, arg, size);
5876 		break;
5877 	}
5878 
5879 	case IOV_SVAL(IOV_SBREG):
5880 	{
5881 		sdreg_t sdreg;
5882 		uint32 addr, size;
5883 
5884 		bcopy(params, &sdreg, sizeof(sdreg));
5885 
5886 		addr = sdreg.offset | SI_ENUM_BASE(bus->sih);
5887 		size = sdreg.func;
5888 		if (serialized_backplane_access(bus, addr, size,
5889 			(uint *)(&sdreg.value), FALSE) != BCME_OK) {
5890 			DHD_ERROR(("Invalid size/addr combination \n"));
5891 			bcmerror = BCME_ERROR;
5892 		}
5893 		break;
5894 	}
5895 
5896 	case IOV_GVAL(IOV_PCIESERDESREG):
5897 	{
5898 		uint val;
5899 		if (!PCIE_GEN2(bus->sih)) {
5900 			DHD_ERROR(("supported only in pcie gen2\n"));
5901 			bcmerror = BCME_ERROR;
5902 			break;
5903 		}
5904 
5905 		if (!pcie2_mdioop(bus, int_val, int_val2, FALSE, &val, FALSE)) {
5906 			bcopy(&val, arg, sizeof(int32));
5907 		} else {
5908 			DHD_ERROR(("pcie2_mdioop failed.\n"));
5909 			bcmerror = BCME_ERROR;
5910 		}
5911 		break;
5912 	}
5913 
5914 	case IOV_SVAL(IOV_PCIESERDESREG):
5915 		if (!PCIE_GEN2(bus->sih)) {
5916 			DHD_ERROR(("supported only in pcie gen2\n"));
5917 			bcmerror = BCME_ERROR;
5918 			break;
5919 		}
5920 		if (pcie2_mdioop(bus, int_val, int_val2, TRUE, (uint *)&int_val3, FALSE)) {
5921 			DHD_ERROR(("pcie2_mdioop failed.\n"));
5922 			bcmerror = BCME_ERROR;
5923 		}
5924 		break;
5925 	case IOV_GVAL(IOV_PCIECOREREG):
5926 		int_val = si_corereg(bus->sih, bus->sih->buscoreidx, int_val, 0, 0);
5927 		bcopy(&int_val, arg, sizeof(int_val));
5928 		break;
5929 
5930 	case IOV_SVAL(IOV_PCIECFGREG):
5931 		OSL_PCI_WRITE_CONFIG(bus->osh, int_val, 4, int_val2);
5932 		break;
5933 
5934 	case IOV_GVAL(IOV_PCIECFGREG):
5935 		int_val = OSL_PCI_READ_CONFIG(bus->osh, int_val, 4);
5936 		bcopy(&int_val, arg, sizeof(int_val));
5937 		break;
5938 #endif /* DHD_PCIE_REG_ACCESS */
5939 	case IOV_SVAL(IOV_PCIE_LPBK):
5940 		bcmerror = dhdpcie_bus_lpback_req(bus, int_val);
5941 		break;
5942 
5943 	case IOV_SVAL(IOV_PCIE_DMAXFER): {
5944 		dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)arg;
5945 
5946 		if (!dmaxfer)
5947 			return BCME_BADARG;
5948 		if (dmaxfer->version != DHD_DMAXFER_VERSION)
5949 			return BCME_VERSION;
5950 		if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
5951 			return BCME_BADLEN;
5952 		}
5953 
5954 		bcmerror = dhdpcie_bus_dmaxfer_req(bus, dmaxfer->num_bytes,
5955 				dmaxfer->src_delay, dmaxfer->dest_delay,
5956 				dmaxfer->type, dmaxfer->core_num,
5957 				dmaxfer->should_wait);
5958 
5959 		if (dmaxfer->should_wait && bcmerror >= 0) {
5960 			bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
5961 		}
5962 		break;
5963 	}
5964 
5965 	case IOV_GVAL(IOV_PCIE_DMAXFER): {
5966 		dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)params;
5967 		if (!dmaxfer)
5968 			return BCME_BADARG;
5969 		if (dmaxfer->version != DHD_DMAXFER_VERSION)
5970 			return BCME_VERSION;
5971 		if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
5972 			return BCME_BADLEN;
5973 		}
5974 		bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
5975 		break;
5976 	}
5977 
5978 	case IOV_GVAL(IOV_PCIE_SUSPEND):
5979 		int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
5980 		bcopy(&int_val, arg, val_size);
5981 		break;
5982 
5983 	case IOV_SVAL(IOV_PCIE_SUSPEND):
5984 		if (bool_val) { /* Suspend */
5985 			int ret;
5986 			unsigned long flags;
5987 
5988 			/*
5989 			 * If some other context is busy, wait until they are done,
5990 			 * before starting suspend
5991 			 */
5992 			ret = dhd_os_busbusy_wait_condition(bus->dhd,
5993 				&bus->dhd->dhd_bus_busy_state, DHD_BUS_BUSY_IN_DHD_IOVAR);
5994 			if (ret == 0) {
5995 				DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n",
5996 					__FUNCTION__, bus->dhd->dhd_bus_busy_state));
5997 				return BCME_BUSY;
5998 			}
5999 
6000 			DHD_GENERAL_LOCK(bus->dhd, flags);
6001 			DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
6002 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
6003 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
6004 			dhdpcie_bus_suspend(bus, TRUE, TRUE);
6005 #else
6006 			dhdpcie_bus_suspend(bus, TRUE);
6007 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
6008 
6009 			DHD_GENERAL_LOCK(bus->dhd, flags);
6010 			DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
6011 			dhd_os_busbusy_wake(bus->dhd);
6012 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
6013 		} else { /* Resume */
6014 			unsigned long flags;
6015 			DHD_GENERAL_LOCK(bus->dhd, flags);
6016 			DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
6017 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
6018 
6019 			dhdpcie_bus_suspend(bus, FALSE);
6020 
6021 			DHD_GENERAL_LOCK(bus->dhd, flags);
6022 			DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
6023 			dhd_os_busbusy_wake(bus->dhd);
6024 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
6025 		}
6026 		break;
6027 
6028 	case IOV_GVAL(IOV_MEMSIZE):
6029 		int_val = (int32)bus->ramsize;
6030 		bcopy(&int_val, arg, val_size);
6031 		break;
6032 #ifdef DHD_BUS_MEM_ACCESS
6033 	case IOV_SVAL(IOV_MEMBYTES):
6034 	case IOV_GVAL(IOV_MEMBYTES):
6035 	{
6036 		uint32 address;		/* absolute backplane address */
6037 		uint size, dsize;
6038 		uint8 *data;
6039 
6040 		bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
6041 
6042 		ASSERT(plen >= 2*sizeof(int));
6043 
6044 		address = (uint32)int_val;
6045 		bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
6046 		size = (uint)int_val;
6047 
6048 		/* Do some validation */
6049 		dsize = set ? plen - (2 * sizeof(int)) : len;
6050 		if (dsize < size) {
6051 			DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
6052 			           __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
6053 			bcmerror = BCME_BADARG;
6054 			break;
6055 		}
6056 
6057 		DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n dsize %d ", __FUNCTION__,
6058 		          (set ? "write" : "read"), size, address, dsize));
6059 
6060 		/* check if CR4 */
6061 		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
6062 		    si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
6063 			/* if address is 0, store the reset instruction to be written in 0 */
6064 			if (set && address == bus->dongle_ram_base) {
6065 				bus->resetinstr = *(((uint32*)params) + 2);
6066 			}
6067 		} else {
6068 		/* If we know about SOCRAM, check for a fit */
6069 		if ((bus->orig_ramsize) &&
6070 		    ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize)))
6071 		{
6072 			uint8 enable, protect, remap;
6073 			si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
6074 			if (!enable || protect) {
6075 				DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n",
6076 					__FUNCTION__, bus->orig_ramsize, size, address));
6077 				DHD_ERROR(("%s: socram enable %d, protect %d\n",
6078 					__FUNCTION__, enable, protect));
6079 				bcmerror = BCME_BADARG;
6080 				break;
6081 			}
6082 
6083 			if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) {
6084 				uint32 devramsize = si_socdevram_size(bus->sih);
6085 				if ((address < SOCDEVRAM_ARM_ADDR) ||
6086 					(address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) {
6087 					DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n",
6088 						__FUNCTION__, address, size));
6089 					DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n",
6090 						__FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize));
6091 					bcmerror = BCME_BADARG;
6092 					break;
6093 				}
6094 				/* move it such that address is real now */
6095 				address -= SOCDEVRAM_ARM_ADDR;
6096 				address += SOCDEVRAM_BP_ADDR;
6097 				DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n",
6098 					__FUNCTION__, (set ? "write" : "read"), size, address));
6099 			} else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) {
6100 				/* Can not access remap region while devram remap bit is set
6101 				 * ROM content would be returned in this case
6102 				 */
6103 				DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n",
6104 					__FUNCTION__, address));
6105 				bcmerror = BCME_ERROR;
6106 				break;
6107 			}
6108 		}
6109 		}
6110 
6111 		/* Generate the actual data pointer */
6112 		data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
6113 
6114 		/* Call to do the transfer */
6115 		bcmerror = dhdpcie_bus_membytes(bus, set, address, data, size);
6116 
6117 		break;
6118 	}
6119 #endif /* DHD_BUS_MEM_ACCESS */
6120 
6121 	/* Debug related. Dumps core registers or one of the dongle memory */
6122 	case IOV_GVAL(IOV_DUMP_DONGLE):
6123 	{
6124 		dump_dongle_in_t ddi = *(dump_dongle_in_t*)params;
6125 		dump_dongle_out_t *ddo = (dump_dongle_out_t*)arg;
6126 		uint32 *p = ddo->val;
6127 		const uint max_offset = 4096 - 1; /* one core contains max 4096/4 registers */
6128 
6129 		if (plen < sizeof(ddi) || len < sizeof(ddo)) {
6130 			bcmerror = BCME_BADARG;
6131 			break;
6132 		}
6133 
6134 		switch (ddi.type) {
6135 		case DUMP_DONGLE_COREREG:
6136 			ddo->n_bytes = 0;
6137 
6138 			if (si_setcoreidx(bus->sih, ddi.index) == NULL) {
6139 				break; // beyond last core: core enumeration ended
6140 			}
6141 
6142 			ddo->address = si_addrspace(bus->sih, CORE_SLAVE_PORT_0, CORE_BASE_ADDR_0);
6143 			ddo->address += ddi.offset; // BP address at which this dump starts
6144 
6145 			ddo->id = si_coreid(bus->sih);
6146 			ddo->rev = si_corerev(bus->sih);
6147 
6148 			while (ddi.offset < max_offset &&
6149 				sizeof(dump_dongle_out_t) + ddo->n_bytes < (uint)len) {
6150 				*p++ = si_corereg(bus->sih, ddi.index, ddi.offset, 0, 0);
6151 				ddi.offset += sizeof(uint32);
6152 				ddo->n_bytes += sizeof(uint32);
6153 			}
6154 			break;
6155 		default:
6156 			// TODO: implement d11 SHM/TPL dumping
6157 			bcmerror = BCME_BADARG;
6158 			break;
6159 		}
6160 		break;
6161 	}
6162 
6163 	/* Debug related. Returns a string with dongle capabilities */
6164 	case IOV_GVAL(IOV_DNGL_CAPS):
6165 	{
6166 		strncpy(arg, bus->dhd->fw_capabilities,
6167 			MIN(strlen(bus->dhd->fw_capabilities), (size_t)len));
6168 		((char*)arg)[len - 1] = '\0';
6169 		break;
6170 	}
6171 
6172 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
6173 	case IOV_SVAL(IOV_GDB_SERVER):
6174 		/* debugger_*() functions may sleep, so cannot hold spinlock */
6175 		DHD_PERIM_UNLOCK(bus->dhd);
6176 		if (int_val > 0) {
6177 			debugger_init((void *) bus, &bus_ops, int_val, SI_ENUM_BASE(bus->sih));
6178 		} else {
6179 			debugger_close();
6180 		}
6181 		DHD_PERIM_LOCK(bus->dhd);
6182 		break;
6183 #endif /* DEBUGGER || DHD_DSCOPE */
6184 
6185 #ifdef BCM_BUZZZ
6186 	/* Dump dongle side buzzz trace to console */
6187 	case IOV_GVAL(IOV_BUZZZ_DUMP):
6188 		bcmerror = dhd_buzzz_dump_dngl(bus);
6189 		break;
6190 #endif /* BCM_BUZZZ */
6191 
6192 	case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
6193 		bcmerror = dhdpcie_bus_download_state(bus, bool_val);
6194 		break;
6195 
6196 	case IOV_GVAL(IOV_RAMSIZE):
6197 		int_val = (int32)bus->ramsize;
6198 		bcopy(&int_val, arg, val_size);
6199 		break;
6200 
6201 	case IOV_SVAL(IOV_RAMSIZE):
6202 		bus->ramsize = int_val;
6203 		bus->orig_ramsize = int_val;
6204 		break;
6205 
6206 	case IOV_GVAL(IOV_RAMSTART):
6207 		int_val = (int32)bus->dongle_ram_base;
6208 		bcopy(&int_val, arg, val_size);
6209 		break;
6210 
6211 	case IOV_GVAL(IOV_CC_NVMSHADOW):
6212 	{
6213 		struct bcmstrbuf dump_b;
6214 
6215 		bcm_binit(&dump_b, arg, len);
6216 		bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b);
6217 		break;
6218 	}
6219 
6220 	case IOV_GVAL(IOV_SLEEP_ALLOWED):
6221 		bool_val = bus->sleep_allowed;
6222 		bcopy(&bool_val, arg, val_size);
6223 		break;
6224 
6225 	case IOV_SVAL(IOV_SLEEP_ALLOWED):
6226 		bus->sleep_allowed = bool_val;
6227 		break;
6228 
6229 	case IOV_GVAL(IOV_DONGLEISOLATION):
6230 		int_val = bus->dhd->dongle_isolation;
6231 		bcopy(&int_val, arg, val_size);
6232 		break;
6233 
6234 	case IOV_SVAL(IOV_DONGLEISOLATION):
6235 		bus->dhd->dongle_isolation = bool_val;
6236 		break;
6237 
6238 	case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD):
6239 		int_val = bus->ltrsleep_on_unload;
6240 		bcopy(&int_val, arg, val_size);
6241 		break;
6242 
6243 	case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD):
6244 		bus->ltrsleep_on_unload = bool_val;
6245 		break;
6246 
6247 	case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK):
6248 	{
6249 		struct bcmstrbuf dump_b;
6250 		bcm_binit(&dump_b, arg, len);
6251 		bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b);
6252 		break;
6253 	}
6254 	case IOV_GVAL(IOV_DMA_RINGINDICES):
6255 	{
6256 		int_val = dhdpcie_get_dma_ring_indices(bus->dhd);
6257 		bcopy(&int_val, arg, sizeof(int_val));
6258 		break;
6259 	}
6260 	case IOV_SVAL(IOV_DMA_RINGINDICES):
6261 		bcmerror = dhdpcie_set_dma_ring_indices(bus->dhd, int_val);
6262 		break;
6263 
6264 	case IOV_GVAL(IOV_METADATA_DBG):
6265 		int_val = dhd_prot_metadata_dbg_get(bus->dhd);
6266 		bcopy(&int_val, arg, val_size);
6267 		break;
6268 	case IOV_SVAL(IOV_METADATA_DBG):
6269 		dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0));
6270 		break;
6271 
6272 	case IOV_GVAL(IOV_RX_METADATALEN):
6273 		int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE);
6274 		bcopy(&int_val, arg, val_size);
6275 		break;
6276 
6277 	case IOV_SVAL(IOV_RX_METADATALEN):
6278 		if (int_val > 64) {
6279 			bcmerror = BCME_BUFTOOLONG;
6280 			break;
6281 		}
6282 		dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE);
6283 		break;
6284 
6285 	case IOV_SVAL(IOV_TXP_THRESHOLD):
6286 		dhd_prot_txp_threshold(bus->dhd, TRUE, int_val);
6287 		break;
6288 
6289 	case IOV_GVAL(IOV_TXP_THRESHOLD):
6290 		int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val);
6291 		bcopy(&int_val, arg, val_size);
6292 		break;
6293 
6294 	case IOV_SVAL(IOV_DB1_FOR_MB):
6295 		if (int_val)
6296 			bus->db1_for_mb = TRUE;
6297 		else
6298 			bus->db1_for_mb = FALSE;
6299 		break;
6300 
6301 	case IOV_GVAL(IOV_DB1_FOR_MB):
6302 		if (bus->db1_for_mb)
6303 			int_val = 1;
6304 		else
6305 			int_val = 0;
6306 		bcopy(&int_val, arg, val_size);
6307 		break;
6308 
6309 	case IOV_GVAL(IOV_TX_METADATALEN):
6310 		int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE);
6311 		bcopy(&int_val, arg, val_size);
6312 		break;
6313 
6314 	case IOV_SVAL(IOV_TX_METADATALEN):
6315 		if (int_val > 64) {
6316 			bcmerror = BCME_BUFTOOLONG;
6317 			break;
6318 		}
6319 		dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE);
6320 		break;
6321 
6322 	case IOV_SVAL(IOV_DEVRESET):
6323 		switch (int_val) {
6324 			case DHD_BUS_DEVRESET_ON:
6325 				bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
6326 				break;
6327 			case DHD_BUS_DEVRESET_OFF:
6328 				bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
6329 				break;
6330 			case DHD_BUS_DEVRESET_FLR:
6331 				bcmerror = dhd_bus_perform_flr(bus, bus->flr_force_fail);
6332 				break;
6333 			case DHD_BUS_DEVRESET_FLR_FORCE_FAIL:
6334 				bus->flr_force_fail = TRUE;
6335 				break;
6336 			default:
6337 				DHD_ERROR(("%s: invalid argument for devreset\n", __FUNCTION__));
6338 				break;
6339 		}
6340 		break;
6341 	case IOV_SVAL(IOV_FORCE_FW_TRAP):
6342 		if (bus->dhd->busstate == DHD_BUS_DATA)
6343 			dhdpcie_fw_trap(bus);
6344 		else {
6345 			DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__));
6346 			bcmerror = BCME_NOTUP;
6347 		}
6348 		break;
6349 	case IOV_GVAL(IOV_FLOW_PRIO_MAP):
6350 		int_val = bus->dhd->flow_prio_map_type;
6351 		bcopy(&int_val, arg, val_size);
6352 		break;
6353 
6354 	case IOV_SVAL(IOV_FLOW_PRIO_MAP):
6355 		int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val);
6356 		bcopy(&int_val, arg, val_size);
6357 		break;
6358 
6359 #ifdef DHD_PCIE_RUNTIMEPM
6360 	case IOV_GVAL(IOV_IDLETIME):
6361 		if (!(bus->dhd->op_mode & DHD_FLAG_MFG_MODE)) {
6362 			int_val = bus->idletime;
6363 		} else {
6364 			int_val = 0;
6365 		}
6366 		bcopy(&int_val, arg, val_size);
6367 		break;
6368 
6369 	case IOV_SVAL(IOV_IDLETIME):
6370 		if (int_val < 0) {
6371 			bcmerror = BCME_BADARG;
6372 		} else {
6373 			bus->idletime = int_val;
6374 			if (bus->idletime) {
6375 				DHD_ENABLE_RUNTIME_PM(bus->dhd);
6376 			} else {
6377 				DHD_DISABLE_RUNTIME_PM(bus->dhd);
6378 			}
6379 		}
6380 		break;
6381 #endif /* DHD_PCIE_RUNTIMEPM */
6382 
6383 	case IOV_GVAL(IOV_TXBOUND):
6384 		int_val = (int32)dhd_txbound;
6385 		bcopy(&int_val, arg, val_size);
6386 		break;
6387 
6388 	case IOV_SVAL(IOV_TXBOUND):
6389 		dhd_txbound = (uint)int_val;
6390 		break;
6391 
6392 	case IOV_SVAL(IOV_H2D_MAILBOXDATA):
6393 		dhdpcie_send_mb_data(bus, (uint)int_val);
6394 		break;
6395 
6396 	case IOV_SVAL(IOV_INFORINGS):
6397 		dhd_prot_init_info_rings(bus->dhd);
6398 		break;
6399 
6400 	case IOV_SVAL(IOV_H2D_PHASE):
6401 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
6402 			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
6403 				__FUNCTION__));
6404 			bcmerror = BCME_NOTDOWN;
6405 			break;
6406 		}
6407 		if (int_val)
6408 			bus->dhd->h2d_phase_supported = TRUE;
6409 		else
6410 			bus->dhd->h2d_phase_supported = FALSE;
6411 		break;
6412 
6413 	case IOV_GVAL(IOV_H2D_PHASE):
6414 		int_val = (int32) bus->dhd->h2d_phase_supported;
6415 		bcopy(&int_val, arg, val_size);
6416 		break;
6417 
6418 	case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
6419 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
6420 			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
6421 				__FUNCTION__));
6422 			bcmerror = BCME_NOTDOWN;
6423 			break;
6424 		}
6425 		if (int_val)
6426 			bus->dhd->force_dongletrap_on_bad_h2d_phase = TRUE;
6427 		else
6428 			bus->dhd->force_dongletrap_on_bad_h2d_phase = FALSE;
6429 		break;
6430 
6431 	case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
6432 		int_val = (int32) bus->dhd->force_dongletrap_on_bad_h2d_phase;
6433 		bcopy(&int_val, arg, val_size);
6434 		break;
6435 
6436 	case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM):
6437 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
6438 			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
6439 				__FUNCTION__));
6440 			bcmerror = BCME_NOTDOWN;
6441 			break;
6442 		}
6443 		dhd_prot_set_h2d_max_txpost(bus->dhd, (uint16)int_val);
6444 		break;
6445 
6446 	case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM):
6447 		int_val = dhd_prot_get_h2d_max_txpost(bus->dhd);
6448 		bcopy(&int_val, arg, val_size);
6449 		break;
6450 
6451 	case IOV_GVAL(IOV_RXBOUND):
6452 		int_val = (int32)dhd_rxbound;
6453 		bcopy(&int_val, arg, val_size);
6454 		break;
6455 
6456 	case IOV_SVAL(IOV_RXBOUND):
6457 		dhd_rxbound = (uint)int_val;
6458 		break;
6459 
6460 	case IOV_GVAL(IOV_TRAPDATA):
6461 	{
6462 		struct bcmstrbuf dump_b;
6463 		bcm_binit(&dump_b, arg, len);
6464 		bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, FALSE);
6465 		break;
6466 	}
6467 
6468 	case IOV_GVAL(IOV_TRAPDATA_RAW):
6469 	{
6470 		struct bcmstrbuf dump_b;
6471 		bcm_binit(&dump_b, arg, len);
6472 		bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, TRUE);
6473 		break;
6474 	}
6475 #ifdef DHD_PCIE_REG_ACCESS
6476 	case IOV_GVAL(IOV_PCIEASPM): {
6477 		uint8 clkreq = 0;
6478 		uint32 aspm = 0;
6479 
6480 		/* this command is to hide the details, but match the lcreg
6481 		#define PCIE_CLKREQ_ENAB		0x100
6482 		#define PCIE_ASPM_L1_ENAB        	2
6483 		#define PCIE_ASPM_L0s_ENAB       	1
6484 		*/
6485 
6486 		clkreq = dhdpcie_clkreq(bus->dhd->osh, 0, 0);
6487 		aspm = dhdpcie_lcreg(bus->dhd->osh, 0, 0);
6488 
6489 		int_val = ((clkreq & 0x1) << 8) | (aspm & PCIE_ASPM_ENAB);
6490 		bcopy(&int_val, arg, val_size);
6491 		break;
6492 	}
6493 
6494 	case IOV_SVAL(IOV_PCIEASPM): {
6495 		uint32 tmp;
6496 
6497 		tmp = dhdpcie_lcreg(bus->dhd->osh, 0, 0);
6498 		dhdpcie_lcreg(bus->dhd->osh, PCIE_ASPM_ENAB,
6499 			(tmp & ~PCIE_ASPM_ENAB) | (int_val & PCIE_ASPM_ENAB));
6500 
6501 		dhdpcie_clkreq(bus->dhd->osh, 1, ((int_val & 0x100) >> 8));
6502 		break;
6503 	}
6504 #endif /* DHD_PCIE_REG_ACCESS */
6505 	case IOV_SVAL(IOV_HANGREPORT):
6506 		bus->dhd->hang_report = bool_val;
6507 		DHD_ERROR(("%s: Set hang_report as %d\n",
6508 			__FUNCTION__, bus->dhd->hang_report));
6509 		break;
6510 
6511 	case IOV_GVAL(IOV_HANGREPORT):
6512 		int_val = (int32)bus->dhd->hang_report;
6513 		bcopy(&int_val, arg, val_size);
6514 		break;
6515 
6516 	case IOV_SVAL(IOV_CTO_PREVENTION):
6517 		bcmerror = dhdpcie_cto_init(bus, bool_val);
6518 		break;
6519 
6520 	case IOV_GVAL(IOV_CTO_PREVENTION):
6521 		if (bus->sih->buscorerev < 19) {
6522 			bcmerror = BCME_UNSUPPORTED;
6523 			break;
6524 		}
6525 		int_val = (int32)bus->cto_enable;
6526 		bcopy(&int_val, arg, val_size);
6527 		break;
6528 
6529 	case IOV_SVAL(IOV_CTO_THRESHOLD):
6530 		{
6531 			if (bus->sih->buscorerev < 19) {
6532 				bcmerror = BCME_UNSUPPORTED;
6533 				break;
6534 			}
6535 			bus->cto_threshold = (uint32)int_val;
6536 		}
6537 		break;
6538 
6539 	case IOV_GVAL(IOV_CTO_THRESHOLD):
6540 		if (bus->sih->buscorerev < 19) {
6541 			bcmerror = BCME_UNSUPPORTED;
6542 			break;
6543 		}
6544 		if (bus->cto_threshold)
6545 			int_val = (int32)bus->cto_threshold;
6546 		else
6547 			int_val = (int32)PCIE_CTO_TO_THRESH_DEFAULT;
6548 
6549 		bcopy(&int_val, arg, val_size);
6550 		break;
6551 
6552 	case IOV_SVAL(IOV_PCIE_WD_RESET):
6553 		if (bool_val) {
6554 			/* Legacy chipcommon watchdog reset */
6555 			dhdpcie_cc_watchdog_reset(bus);
6556 		}
6557 		break;
6558 
6559 	case IOV_GVAL(IOV_HWA_ENAB_BMAP):
6560 		int_val = bus->hwa_enab_bmap;
6561 		bcopy(&int_val, arg, val_size);
6562 		break;
6563 	case IOV_SVAL(IOV_HWA_ENAB_BMAP):
6564 		bus->hwa_enab_bmap = (uint8)int_val;
6565 		break;
6566 	case IOV_GVAL(IOV_IDMA_ENABLE):
6567 		int_val = bus->idma_enabled;
6568 		bcopy(&int_val, arg, val_size);
6569 		break;
6570 	case IOV_SVAL(IOV_IDMA_ENABLE):
6571 		bus->idma_enabled = (bool)int_val;
6572 		break;
6573 	case IOV_GVAL(IOV_IFRM_ENABLE):
6574 		int_val = bus->ifrm_enabled;
6575 		bcopy(&int_val, arg, val_size);
6576 		break;
6577 	case IOV_SVAL(IOV_IFRM_ENABLE):
6578 		bus->ifrm_enabled = (bool)int_val;
6579 		break;
6580 	case IOV_GVAL(IOV_CLEAR_RING):
6581 		bcopy(&int_val, arg, val_size);
6582 		dhd_flow_rings_flush(bus->dhd, 0);
6583 		break;
6584 	case IOV_GVAL(IOV_DAR_ENABLE):
6585 		int_val = bus->dar_enabled;
6586 		bcopy(&int_val, arg, val_size);
6587 		break;
6588 	case IOV_SVAL(IOV_DAR_ENABLE):
6589 		bus->dar_enabled = (bool)int_val;
6590 		break;
6591 	case IOV_GVAL(IOV_HSCBSIZE):
6592 		bcmerror = dhd_get_hscb_info(bus->dhd, NULL, (uint32 *)arg);
6593 		break;
6594 #ifdef DHD_BUS_MEM_ACCESS
6595 	case IOV_GVAL(IOV_HSCBBYTES):
6596 		bcmerror = dhd_get_hscb_buff(bus->dhd, int_val, int_val2, (void*)arg);
6597 		break;
6598 #endif // endif
6599 
6600 #ifdef DHD_HP2P
6601 	case IOV_SVAL(IOV_HP2P_ENABLE):
6602 		dhd_prot_hp2p_enable(bus->dhd, TRUE, int_val);
6603 		break;
6604 
6605 	case IOV_GVAL(IOV_HP2P_ENABLE):
6606 		int_val = dhd_prot_hp2p_enable(bus->dhd, FALSE, int_val);
6607 		bcopy(&int_val, arg, val_size);
6608 		break;
6609 
6610 	case IOV_SVAL(IOV_HP2P_PKT_THRESHOLD):
6611 		dhd_prot_pkt_threshold(bus->dhd, TRUE, int_val);
6612 		break;
6613 
6614 	case IOV_GVAL(IOV_HP2P_PKT_THRESHOLD):
6615 		int_val = dhd_prot_pkt_threshold(bus->dhd, FALSE, int_val);
6616 		bcopy(&int_val, arg, val_size);
6617 		break;
6618 
6619 	case IOV_SVAL(IOV_HP2P_TIME_THRESHOLD):
6620 		dhd_prot_time_threshold(bus->dhd, TRUE, int_val);
6621 		break;
6622 
6623 	case IOV_GVAL(IOV_HP2P_TIME_THRESHOLD):
6624 		int_val = dhd_prot_time_threshold(bus->dhd, FALSE, int_val);
6625 		bcopy(&int_val, arg, val_size);
6626 		break;
6627 
6628 	case IOV_SVAL(IOV_HP2P_PKT_EXPIRY):
6629 		dhd_prot_pkt_expiry(bus->dhd, TRUE, int_val);
6630 		break;
6631 
6632 	case IOV_GVAL(IOV_HP2P_PKT_EXPIRY):
6633 		int_val = dhd_prot_pkt_expiry(bus->dhd, FALSE, int_val);
6634 		bcopy(&int_val, arg, val_size);
6635 		break;
6636 	case IOV_SVAL(IOV_HP2P_TXCPL_MAXITEMS):
6637 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
6638 			return BCME_NOTDOWN;
6639 		}
6640 		dhd_bus_set_hp2p_ring_max_size(bus, TRUE, int_val);
6641 		break;
6642 
6643 	case IOV_GVAL(IOV_HP2P_TXCPL_MAXITEMS):
6644 		int_val = dhd_bus_get_hp2p_ring_max_size(bus, TRUE);
6645 		bcopy(&int_val, arg, val_size);
6646 		break;
6647 	case IOV_SVAL(IOV_HP2P_RXCPL_MAXITEMS):
6648 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
6649 			return BCME_NOTDOWN;
6650 		}
6651 		dhd_bus_set_hp2p_ring_max_size(bus, FALSE, int_val);
6652 		break;
6653 
6654 	case IOV_GVAL(IOV_HP2P_RXCPL_MAXITEMS):
6655 		int_val = dhd_bus_get_hp2p_ring_max_size(bus, FALSE);
6656 		bcopy(&int_val, arg, val_size);
6657 		break;
6658 #endif /* DHD_HP2P */
6659 	case IOV_SVAL(IOV_EXTDTXS_IN_TXCPL):
6660 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
6661 			return BCME_NOTDOWN;
6662 		}
6663 		if (int_val)
6664 			bus->dhd->extdtxs_in_txcpl = TRUE;
6665 		else
6666 			bus->dhd->extdtxs_in_txcpl = FALSE;
6667 		break;
6668 
6669 	case IOV_GVAL(IOV_EXTDTXS_IN_TXCPL):
6670 		int_val = bus->dhd->extdtxs_in_txcpl;
6671 		bcopy(&int_val, arg, val_size);
6672 		break;
6673 
6674 	case IOV_SVAL(IOV_HOSTRDY_AFTER_INIT):
6675 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
6676 			return BCME_NOTDOWN;
6677 		}
6678 		if (int_val)
6679 			bus->dhd->hostrdy_after_init = TRUE;
6680 		else
6681 			bus->dhd->hostrdy_after_init = FALSE;
6682 		break;
6683 
6684 	case IOV_GVAL(IOV_HOSTRDY_AFTER_INIT):
6685 		int_val = bus->dhd->hostrdy_after_init;
6686 		bcopy(&int_val, arg, val_size);
6687 		break;
6688 
6689 	default:
6690 		bcmerror = BCME_UNSUPPORTED;
6691 		break;
6692 	}
6693 
6694 exit:
6695 	return bcmerror;
6696 } /* dhdpcie_bus_doiovar */
6697 
6698 /** Transfers bytes from host to dongle using pio mode */
6699 static int
dhdpcie_bus_lpback_req(struct dhd_bus * bus,uint32 len)6700 dhdpcie_bus_lpback_req(struct  dhd_bus *bus, uint32 len)
6701 {
6702 	if (bus->dhd == NULL) {
6703 		DHD_ERROR(("bus not inited\n"));
6704 		return 0;
6705 	}
6706 	if (bus->dhd->prot == NULL) {
6707 		DHD_ERROR(("prot is not inited\n"));
6708 		return 0;
6709 	}
6710 	if (bus->dhd->busstate != DHD_BUS_DATA) {
6711 		DHD_ERROR(("not in a readystate to LPBK  is not inited\n"));
6712 		return 0;
6713 	}
6714 	dhdmsgbuf_lpbk_req(bus->dhd, len);
6715 	return 0;
6716 }
6717 
6718 void
dhd_bus_dump_dar_registers(struct dhd_bus * bus)6719 dhd_bus_dump_dar_registers(struct dhd_bus *bus)
6720 {
6721 	uint32 dar_clk_ctrl_val, dar_pwr_ctrl_val, dar_intstat_val,
6722 		dar_errlog_val, dar_erraddr_val, dar_pcie_mbint_val;
6723 	uint32 dar_clk_ctrl_reg, dar_pwr_ctrl_reg, dar_intstat_reg,
6724 		dar_errlog_reg, dar_erraddr_reg, dar_pcie_mbint_reg;
6725 
6726 	if (bus->is_linkdown && !bus->cto_triggered) {
6727 		DHD_ERROR(("%s: link is down\n", __FUNCTION__));
6728 		return;
6729 	}
6730 
6731 	dar_clk_ctrl_reg = (uint32)DAR_CLK_CTRL(bus->sih->buscorerev);
6732 	dar_pwr_ctrl_reg = (uint32)DAR_PCIE_PWR_CTRL(bus->sih->buscorerev);
6733 	dar_intstat_reg = (uint32)DAR_INTSTAT(bus->sih->buscorerev);
6734 	dar_errlog_reg = (uint32)DAR_ERRLOG(bus->sih->buscorerev);
6735 	dar_erraddr_reg = (uint32)DAR_ERRADDR(bus->sih->buscorerev);
6736 	dar_pcie_mbint_reg = (uint32)DAR_PCIMailBoxInt(bus->sih->buscorerev);
6737 
6738 	if (bus->sih->buscorerev < 24) {
6739 		DHD_ERROR(("%s: DAR not supported for corerev(%d) < 24\n",
6740 			__FUNCTION__, bus->sih->buscorerev));
6741 		return;
6742 	}
6743 
6744 	dar_clk_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_clk_ctrl_reg, 0, 0);
6745 	dar_pwr_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pwr_ctrl_reg, 0, 0);
6746 	dar_intstat_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_intstat_reg, 0, 0);
6747 	dar_errlog_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_errlog_reg, 0, 0);
6748 	dar_erraddr_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_erraddr_reg, 0, 0);
6749 	dar_pcie_mbint_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pcie_mbint_reg, 0, 0);
6750 
6751 	DHD_ERROR(("%s: dar_clk_ctrl(0x%x:0x%x) dar_pwr_ctrl(0x%x:0x%x) dar_intstat(0x%x:0x%x)\n",
6752 		__FUNCTION__, dar_clk_ctrl_reg, dar_clk_ctrl_val,
6753 		dar_pwr_ctrl_reg, dar_pwr_ctrl_val, dar_intstat_reg, dar_intstat_val));
6754 
6755 	DHD_ERROR(("%s: dar_errlog(0x%x:0x%x) dar_erraddr(0x%x:0x%x) dar_pcie_mbint(0x%x:0x%x)\n",
6756 		__FUNCTION__, dar_errlog_reg, dar_errlog_val,
6757 		dar_erraddr_reg, dar_erraddr_val, dar_pcie_mbint_reg, dar_pcie_mbint_val));
6758 }
6759 
6760 /* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */
6761 void
dhd_bus_hostready(struct dhd_bus * bus)6762 dhd_bus_hostready(struct  dhd_bus *bus)
6763 {
6764 	if (!bus->dhd->d2h_hostrdy_supported) {
6765 		return;
6766 	}
6767 
6768 	if (bus->is_linkdown) {
6769 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
6770 		return;
6771 	}
6772 
6773 	DHD_ERROR(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
6774 		dhd_pcie_config_read(bus->osh, PCI_CFG_CMD, sizeof(uint32))));
6775 
6776 	if (DAR_PWRREQ(bus)) {
6777 		dhd_bus_pcie_pwr_req(bus);
6778 	}
6779 
6780 	dhd_bus_dump_dar_registers(bus);
6781 
6782 	si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), ~0, 0x12345678);
6783 	bus->hostready_count ++;
6784 	DHD_ERROR(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
6785 }
6786 
6787 /* Clear INTSTATUS */
6788 void
dhdpcie_bus_clear_intstatus(struct dhd_bus * bus)6789 dhdpcie_bus_clear_intstatus(struct dhd_bus *bus)
6790 {
6791 	uint32 intstatus = 0;
6792 	if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
6793 		(bus->sih->buscorerev == 2)) {
6794 		intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
6795 		dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
6796 	} else {
6797 		/* this is a PCIE core register..not a config register... */
6798 		intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
6799 		si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
6800 			intstatus);
6801 	}
6802 }
6803 
6804 int
6805 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
dhdpcie_bus_suspend(struct dhd_bus * bus,bool state,bool byint)6806 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint)
6807 #else
6808 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
6809 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
6810 {
6811 	int timeleft;
6812 	int rc = 0;
6813 	unsigned long flags, flags_bus;
6814 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
6815 	int d3_read_retry = 0;
6816 	uint32 d2h_mb_data = 0;
6817 	uint32 zero = 0;
6818 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
6819 
6820 	if (bus->dhd == NULL) {
6821 		DHD_ERROR(("bus not inited\n"));
6822 		return BCME_ERROR;
6823 	}
6824 	if (bus->dhd->prot == NULL) {
6825 		DHD_ERROR(("prot is not inited\n"));
6826 		return BCME_ERROR;
6827 	}
6828 
6829 	if (dhd_query_bus_erros(bus->dhd)) {
6830 		return BCME_ERROR;
6831 	}
6832 
6833 	DHD_GENERAL_LOCK(bus->dhd, flags);
6834 	if (!(bus->dhd->busstate == DHD_BUS_DATA || bus->dhd->busstate == DHD_BUS_SUSPEND)) {
6835 		DHD_ERROR(("not in a readystate\n"));
6836 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
6837 		return BCME_ERROR;
6838 	}
6839 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
6840 	if (bus->dhd->dongle_reset) {
6841 		DHD_ERROR(("Dongle is in reset state.\n"));
6842 		return -EIO;
6843 	}
6844 
6845 	/* Check whether we are already in the requested state.
6846 	 * state=TRUE means Suspend
6847 	 * state=FALSE meanse Resume
6848 	 */
6849 	if (state == TRUE && bus->dhd->busstate == DHD_BUS_SUSPEND) {
6850 		DHD_ERROR(("Bus is already in SUSPEND state.\n"));
6851 		return BCME_OK;
6852 	} else if (state == FALSE && bus->dhd->busstate == DHD_BUS_DATA) {
6853 		DHD_ERROR(("Bus is already in RESUME state.\n"));
6854 		return BCME_OK;
6855 	}
6856 
6857 	if (state) {
6858 #ifdef OEM_ANDROID
6859 		int idle_retry = 0;
6860 		int active;
6861 #endif /* OEM_ANDROID */
6862 
6863 		if (bus->is_linkdown) {
6864 			DHD_ERROR(("%s: PCIe link was down, state=%d\n",
6865 				__FUNCTION__, state));
6866 			return BCME_ERROR;
6867 		}
6868 
6869 		/* Suspend */
6870 		DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__));
6871 
6872 		bus->dhd->dhd_watchdog_ms_backup = dhd_watchdog_ms;
6873 		if (bus->dhd->dhd_watchdog_ms_backup) {
6874 			DHD_ERROR(("%s: Disabling wdtick before going to suspend\n",
6875 				__FUNCTION__));
6876 			dhd_os_wd_timer(bus->dhd, 0);
6877 		}
6878 
6879 		DHD_GENERAL_LOCK(bus->dhd, flags);
6880 		if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) {
6881 			DHD_ERROR(("Tx Request is not ended\n"));
6882 			bus->dhd->busstate = DHD_BUS_DATA;
6883 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
6884 			return -EBUSY;
6885 		}
6886 
6887 		bus->last_suspend_start_time = OSL_LOCALTIME_NS();
6888 
6889 		/* stop all interface network queue. */
6890 		dhd_bus_stop_queue(bus);
6891 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
6892 
6893 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
6894 		if (byint) {
6895 			DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
6896 			/* Clear wait_for_d3_ack before sending D3_INFORM */
6897 			bus->wait_for_d3_ack = 0;
6898 			dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
6899 
6900 			timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
6901 			DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
6902 		} else {
6903 			/* Clear wait_for_d3_ack before sending D3_INFORM */
6904 			bus->wait_for_d3_ack = 0;
6905 			dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM | H2D_HOST_ACK_NOINT);
6906 			while (!bus->wait_for_d3_ack && d3_read_retry < MAX_D3_ACK_TIMEOUT) {
6907 				dhdpcie_handle_mb_data(bus);
6908 				usleep_range(1000, 1500);
6909 				d3_read_retry++;
6910 			}
6911 		}
6912 #else
6913 		DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
6914 		/* Clear wait_for_d3_ack before sending D3_INFORM */
6915 		bus->wait_for_d3_ack = 0;
6916 		/*
6917 		 * Send H2D_HOST_D3_INFORM to dongle and mark bus->bus_low_power_state
6918 		 * to DHD_BUS_D3_INFORM_SENT in dhd_prot_ring_write_complete_mbdata
6919 		 * inside atomic context, so that no more DBs will be
6920 		 * rung after sending D3_INFORM
6921 		 */
6922 		dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
6923 
6924 		/* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */
6925 
6926 		timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
6927 
6928 #ifdef DHD_RECOVER_TIMEOUT
6929 		if (bus->wait_for_d3_ack == 0) {
6930 			/* If wait_for_d3_ack was not updated because D2H MB was not received */
6931 			uint32 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
6932 				bus->pcie_mailbox_int, 0, 0);
6933 			int host_irq_disabled = dhdpcie_irq_disabled(bus);
6934 			if ((intstatus) && (intstatus != (uint32)-1) &&
6935 				(timeleft == 0) && (!dhd_query_bus_erros(bus->dhd))) {
6936 				DHD_ERROR(("%s: D3 ACK trying again intstatus=%x"
6937 					" host_irq_disabled=%d\n",
6938 					__FUNCTION__, intstatus, host_irq_disabled));
6939 				dhd_pcie_intr_count_dump(bus->dhd);
6940 				dhd_print_tasklet_status(bus->dhd);
6941 				if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 &&
6942 					!bus->use_mailbox) {
6943 					dhd_prot_process_ctrlbuf(bus->dhd);
6944 				} else {
6945 					dhdpcie_handle_mb_data(bus);
6946 				}
6947 				timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
6948 				/* Clear Interrupts */
6949 				dhdpcie_bus_clear_intstatus(bus);
6950 			}
6951 		} /* bus->wait_for_d3_ack was 0 */
6952 #endif /* DHD_RECOVER_TIMEOUT */
6953 
6954 		DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
6955 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
6956 
6957 #ifdef OEM_ANDROID
6958 		/* To allow threads that got pre-empted to complete.
6959 		 */
6960 		while ((active = dhd_os_check_wakelock_all(bus->dhd)) &&
6961 			(idle_retry < MAX_WKLK_IDLE_CHECK)) {
6962 			OSL_SLEEP(1);
6963 			idle_retry++;
6964 		}
6965 #endif /* OEM_ANDROID */
6966 
6967 		if (bus->wait_for_d3_ack) {
6968 			DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__));
6969 			/* Got D3 Ack. Suspend the bus */
6970 #ifdef OEM_ANDROID
6971 			if (active) {
6972 				DHD_ERROR(("%s():Suspend failed because of wakelock"
6973 					"restoring Dongle to D0\n", __FUNCTION__));
6974 
6975 				if (bus->dhd->dhd_watchdog_ms_backup) {
6976 					DHD_ERROR(("%s: Enabling wdtick due to wakelock active\n",
6977 						__FUNCTION__));
6978 					dhd_os_wd_timer(bus->dhd,
6979 						bus->dhd->dhd_watchdog_ms_backup);
6980 				}
6981 
6982 				/*
6983 				 * Dongle still thinks that it has to be in D3 state until
6984 				 * it gets a D0 Inform, but we are backing off from suspend.
6985 				 * Ensure that Dongle is brought back to D0.
6986 				 *
6987 				 * Bringing back Dongle from D3 Ack state to D0 state is a
6988 				 * 2 step process. Dongle would want to know that D0 Inform
6989 				 * would be sent as a MB interrupt to bring it out of D3 Ack
6990 				 * state to D0 state. So we have to send both this message.
6991 				 */
6992 
6993 				/* Clear wait_for_d3_ack to send D0_INFORM or host_ready */
6994 				bus->wait_for_d3_ack = 0;
6995 
6996 				DHD_BUS_LOCK(bus->bus_lock, flags_bus);
6997 				bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
6998 				/* Enable back the intmask which was cleared in DPC
6999 				 * after getting D3_ACK.
7000 				 */
7001 				bus->resume_intr_enable_count++;
7002 
7003 				/* For Linux, Macos etc (otherthan NDIS) enable back the dongle
7004 				 * interrupts using intmask and host interrupts
7005 				 * which were disabled in the dhdpcie_bus_isr()->
7006 				 * dhd_bus_handle_d3_ack().
7007 				 */
7008 				/* Enable back interrupt using Intmask!! */
7009 				dhdpcie_bus_intr_enable(bus);
7010 				/* Enable back interrupt from Host side!! */
7011 				dhdpcie_enable_irq(bus);
7012 
7013 				DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
7014 
7015 				if (bus->use_d0_inform) {
7016 					DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
7017 					dhdpcie_send_mb_data(bus,
7018 						(H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM));
7019 					DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
7020 				}
7021 				/* ring doorbell 1 (hostready) */
7022 				dhd_bus_hostready(bus);
7023 
7024 				DHD_GENERAL_LOCK(bus->dhd, flags);
7025 				bus->dhd->busstate = DHD_BUS_DATA;
7026 				/* resume all interface network queue. */
7027 				dhd_bus_start_queue(bus);
7028 				DHD_GENERAL_UNLOCK(bus->dhd, flags);
7029 				rc = BCME_ERROR;
7030 			} else {
7031 				/* Actual Suspend after no wakelock */
7032 #endif /* OEM_ANDROID */
7033 				/* At this time bus->bus_low_power_state will be
7034 				 * made to DHD_BUS_D3_ACK_RECIEVED after recieving D3_ACK
7035 				 * in dhd_bus_handle_d3_ack()
7036 				 */
7037 				if (bus->use_d0_inform &&
7038 					(bus->api.fw_rev < PCIE_SHARED_VERSION_6)) {
7039 					DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
7040 					dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE));
7041 					DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
7042 				}
7043 
7044 #if defined(BCMPCIE_OOB_HOST_WAKE)
7045 				if (bus->dhd->dhd_induce_error == DHD_INDUCE_DROP_OOB_IRQ) {
7046 					DHD_ERROR(("%s: Inducing DROP OOB IRQ\n", __FUNCTION__));
7047 				} else {
7048 					dhdpcie_oob_intr_set(bus, TRUE);
7049 				}
7050 #endif /* BCMPCIE_OOB_HOST_WAKE */
7051 
7052 				DHD_GENERAL_LOCK(bus->dhd, flags);
7053 				/* The Host cannot process interrupts now so disable the same.
7054 				 * No need to disable the dongle INTR using intmask, as we are
7055 				 * already calling disabling INTRs from DPC context after
7056 				 * getting D3_ACK in dhd_bus_handle_d3_ack.
7057 				 * Code may not look symmetric between Suspend and
7058 				 * Resume paths but this is done to close down the timing window
7059 				 * between DPC and suspend context and bus->bus_low_power_state
7060 				 * will be set to DHD_BUS_D3_ACK_RECIEVED in DPC.
7061 				 */
7062 				bus->dhd->d3ackcnt_timeout = 0;
7063 				bus->dhd->busstate = DHD_BUS_SUSPEND;
7064 				DHD_GENERAL_UNLOCK(bus->dhd, flags);
7065 				dhdpcie_dump_resource(bus);
7066 				/* Handle Host Suspend */
7067 				rc = dhdpcie_pci_suspend_resume(bus, state);
7068 				if (!rc) {
7069 					bus->last_suspend_end_time = OSL_LOCALTIME_NS();
7070 				}
7071 #ifdef OEM_ANDROID
7072 			}
7073 #endif /* OEM_ANDROID */
7074 		} else if (timeleft == 0) { /* D3 ACK Timeout */
7075 #ifdef DHD_FW_COREDUMP
7076 			uint32 cur_memdump_mode = bus->dhd->memdump_enabled;
7077 #endif /* DHD_FW_COREDUMP */
7078 
7079 			/* check if the D3 ACK timeout due to scheduling issue */
7080 			bus->dhd->is_sched_error = !dhd_query_bus_erros(bus->dhd) &&
7081 				bus->isr_entry_time > bus->last_d3_inform_time &&
7082 				dhd_bus_query_dpc_sched_errors(bus->dhd);
7083 			bus->dhd->d3ack_timeout_occured = TRUE;
7084 			/* If the D3 Ack has timeout */
7085 			bus->dhd->d3ackcnt_timeout++;
7086 			DHD_ERROR(("%s: resumed on timeout for D3 ACK%s d3_inform_cnt %d\n",
7087 				__FUNCTION__, bus->dhd->is_sched_error ?
7088 				" due to scheduling problem" : "", bus->dhd->d3ackcnt_timeout));
7089 #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
7090 			if (bus->dhd->is_sched_error && cur_memdump_mode == DUMP_MEMFILE_BUGON) {
7091 				/* change g_assert_type to trigger Kernel panic */
7092 				g_assert_type = 2;
7093 				/* use ASSERT() to trigger panic */
7094 				ASSERT(0);
7095 			}
7096 #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
7097 			DHD_BUS_LOCK(bus->bus_lock, flags_bus);
7098 			bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
7099 			DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
7100 			DHD_GENERAL_LOCK(bus->dhd, flags);
7101 			bus->dhd->busstate = DHD_BUS_DATA;
7102 			/* resume all interface network queue. */
7103 			dhd_bus_start_queue(bus);
7104 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
7105 			if (!bus->dhd->dongle_trap_occured &&
7106 				!bus->is_linkdown &&
7107 				!bus->cto_triggered) {
7108 				uint32 intstatus = 0;
7109 
7110 				/* Check if PCIe bus status is valid */
7111 				intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
7112 					bus->pcie_mailbox_int, 0, 0);
7113 				if (intstatus == (uint32)-1) {
7114 					/* Invalidate PCIe bus status */
7115 					bus->is_linkdown = 1;
7116 				}
7117 
7118 				dhd_bus_dump_console_buffer(bus);
7119 				dhd_prot_debug_info_print(bus->dhd);
7120 #ifdef DHD_FW_COREDUMP
7121 				if (cur_memdump_mode) {
7122 					/* write core dump to file */
7123 					bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT;
7124 					dhdpcie_mem_dump(bus);
7125 				}
7126 #endif /* DHD_FW_COREDUMP */
7127 
7128 #ifdef OEM_ANDROID
7129 				DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n",
7130 					__FUNCTION__));
7131 #ifdef SUPPORT_LINKDOWN_RECOVERY
7132 #ifdef CONFIG_ARCH_MSM
7133 				bus->no_cfg_restore = 1;
7134 #endif /* CONFIG_ARCH_MSM */
7135 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7136 				dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT);
7137 #endif /* OEM_ANDROID */
7138 			}
7139 #if defined(DHD_ERPOM)
7140 			dhd_schedule_reset(bus->dhd);
7141 #endif // endif
7142 			rc = -ETIMEDOUT;
7143 		}
7144 	} else {
7145 		/* Resume */
7146 		DHD_ERROR(("%s: Entering resume state\n", __FUNCTION__));
7147 		bus->last_resume_start_time = OSL_LOCALTIME_NS();
7148 
7149 		/**
7150 		 * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold.
7151 		 * si_backplane_access(function to read/write backplane)
7152 		 * updates the window(PCIE2_BAR0_CORE2_WIN) only if
7153 		 * window being accessed is different form the window
7154 		 * being pointed by second_bar0win.
7155 		 * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold,
7156 		 * invalidating second_bar0win after resume updates
7157 		 * PCIE2_BAR0_CORE2_WIN with right window.
7158 		 */
7159 		si_invalidate_second_bar0win(bus->sih);
7160 #if defined(OEM_ANDROID)
7161 #if defined(BCMPCIE_OOB_HOST_WAKE)
7162 		DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
7163 #endif /* BCMPCIE_OOB_HOST_WAKE */
7164 #endif /* linux && OEM_ANDROID */
7165 		rc = dhdpcie_pci_suspend_resume(bus, state);
7166 		dhdpcie_dump_resource(bus);
7167 
7168 		DHD_BUS_LOCK(bus->bus_lock, flags_bus);
7169 		/* set bus_low_power_state to DHD_BUS_NO_LOW_POWER_STATE */
7170 		bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
7171 		DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
7172 
7173 		if (!rc && bus->dhd->busstate == DHD_BUS_SUSPEND) {
7174 			if (bus->use_d0_inform) {
7175 				DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
7176 				dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM));
7177 				DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
7178 			}
7179 			/* ring doorbell 1 (hostready) */
7180 			dhd_bus_hostready(bus);
7181 		}
7182 		DHD_GENERAL_LOCK(bus->dhd, flags);
7183 		bus->dhd->busstate = DHD_BUS_DATA;
7184 #ifdef DHD_PCIE_RUNTIMEPM
7185 		if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) {
7186 			bus->bus_wake = 1;
7187 			OSL_SMP_WMB();
7188 			wake_up_interruptible(&bus->rpm_queue);
7189 		}
7190 #endif /* DHD_PCIE_RUNTIMEPM */
7191 		/* resume all interface network queue. */
7192 		dhd_bus_start_queue(bus);
7193 
7194 		/* TODO: for NDIS also we need to use enable_irq in future */
7195 		bus->resume_intr_enable_count++;
7196 
7197 		/* For Linux, Macos etc (otherthan NDIS) enable back the dongle interrupts
7198 		 * using intmask and host interrupts
7199 		 * which were disabled in the dhdpcie_bus_isr()->dhd_bus_handle_d3_ack().
7200 		 */
7201 		dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */
7202 		dhdpcie_enable_irq(bus); /* Enable back interrupt from Host side!! */
7203 
7204 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
7205 
7206 		if (bus->dhd->dhd_watchdog_ms_backup) {
7207 			DHD_ERROR(("%s: Enabling wdtick after resume\n",
7208 				__FUNCTION__));
7209 			dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
7210 		}
7211 
7212 		bus->last_resume_end_time = OSL_LOCALTIME_NS();
7213 		/* Update TCM rd index for EDL ring */
7214 		DHD_EDL_RING_TCM_RD_UPDATE(bus->dhd);
7215 	}
7216 	return rc;
7217 }
7218 
7219 uint32
dhdpcie_force_alp(struct dhd_bus * bus,bool enable)7220 dhdpcie_force_alp(struct dhd_bus *bus, bool enable)
7221 {
7222 	ASSERT(bus && bus->sih);
7223 	if (enable) {
7224 	si_corereg(bus->sih, bus->sih->buscoreidx,
7225 		OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, CCS_FORCEALP);
7226 	} else {
7227 		si_corereg(bus->sih, bus->sih->buscoreidx,
7228 			OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, 0);
7229 	}
7230 	return 0;
7231 }
7232 
7233 /* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */
7234 uint32
dhdpcie_set_l1_entry_time(struct dhd_bus * bus,int l1_entry_time)7235 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int l1_entry_time)
7236 {
7237 	uint reg_val;
7238 
7239 	ASSERT(bus && bus->sih);
7240 
7241 	si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
7242 		0x1004);
7243 	reg_val = si_corereg(bus->sih, bus->sih->buscoreidx,
7244 		OFFSETOF(sbpcieregs_t, configdata), 0, 0);
7245 	reg_val = (reg_val & ~(0x7f << 16)) | ((l1_entry_time & 0x7f) << 16);
7246 	si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
7247 		reg_val);
7248 
7249 	return 0;
7250 }
7251 
7252 static uint32
dhd_apply_d11_war_length(struct dhd_bus * bus,uint32 len,uint32 d11_lpbk)7253 dhd_apply_d11_war_length(struct  dhd_bus *bus, uint32 len, uint32 d11_lpbk)
7254 {
7255 	uint16 chipid = si_chipid(bus->sih);
7256 	if ((chipid == BCM4375_CHIP_ID ||
7257 		chipid == BCM4362_CHIP_ID ||
7258 		chipid == BCM43751_CHIP_ID ||
7259 		chipid == BCM4377_CHIP_ID) &&
7260 		(d11_lpbk != M2M_DMA_LPBK && d11_lpbk != M2M_NON_DMA_LPBK)) {
7261 			len += 8;
7262 	}
7263 	DHD_ERROR(("%s: len %d\n", __FUNCTION__, len));
7264 	return len;
7265 }
7266 
7267 /** Transfers bytes from host to dongle and to host again using DMA */
7268 static int
dhdpcie_bus_dmaxfer_req(struct dhd_bus * bus,uint32 len,uint32 srcdelay,uint32 destdelay,uint32 d11_lpbk,uint32 core_num,uint32 wait)7269 dhdpcie_bus_dmaxfer_req(struct  dhd_bus *bus,
7270 		uint32 len, uint32 srcdelay, uint32 destdelay,
7271 		uint32 d11_lpbk, uint32 core_num, uint32 wait)
7272 {
7273 	int ret = 0;
7274 
7275 	if (bus->dhd == NULL) {
7276 		DHD_ERROR(("bus not inited\n"));
7277 		return BCME_ERROR;
7278 	}
7279 	if (bus->dhd->prot == NULL) {
7280 		DHD_ERROR(("prot is not inited\n"));
7281 		return BCME_ERROR;
7282 	}
7283 	if (bus->dhd->busstate != DHD_BUS_DATA) {
7284 		DHD_ERROR(("not in a readystate to LPBK  is not inited\n"));
7285 		return BCME_ERROR;
7286 	}
7287 
7288 	if (len < 5 || len > 4194296) {
7289 		DHD_ERROR(("len is too small or too large\n"));
7290 		return BCME_ERROR;
7291 	}
7292 
7293 	len = dhd_apply_d11_war_length(bus, len, d11_lpbk);
7294 
7295 	bus->dmaxfer_complete = FALSE;
7296 	ret = dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay,
7297 		d11_lpbk, core_num);
7298 	if (ret != BCME_OK || !wait) {
7299 		DHD_INFO(("%s: dmaxfer req returns status %u; wait = %u\n", __FUNCTION__,
7300 				ret, wait));
7301 	} else {
7302 		ret = dhd_os_dmaxfer_wait(bus->dhd, &bus->dmaxfer_complete);
7303 		if (ret < 0)
7304 			ret = BCME_NOTREADY;
7305 	}
7306 
7307 	return ret;
7308 
7309 }
7310 
7311 bool
dhd_bus_is_multibp_capable(struct dhd_bus * bus)7312 dhd_bus_is_multibp_capable(struct dhd_bus *bus)
7313 {
7314 	return MULTIBP_CAP(bus->sih);
7315 }
7316 
7317 #define PCIE_REV_FOR_4378A0	66	/* dhd_bus_perform_flr_with_quiesce() causes problems */
7318 #define PCIE_REV_FOR_4378B0	68
7319 
7320 static int
dhdpcie_bus_download_state(dhd_bus_t * bus,bool enter)7321 dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
7322 {
7323 	int bcmerror = 0;
7324 	volatile uint32 *cr4_regs;
7325 	bool do_flr;
7326 	hs_addrs_t bl_hs_addrs = {NULL, NULL};
7327 
7328 	if (bus->sih->chip == CYW55560_CHIP_ID) {
7329 		/* Host bootloader handshake TCM/REGS addresses init */
7330 		bcmerror = dhdpcie_dongle_host_get_handshake_address(bus->sih, bus->osh,
7331 			&bl_hs_addrs);
7332 		if (bcmerror) {
7333 			DHD_ERROR(("%s: REGS/TCM addresses not initialized\n", __FUNCTION__));
7334 			goto fail;
7335 		}
7336 	}
7337 
7338 	if (!bus->sih) {
7339 		DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__));
7340 		return BCME_ERROR;
7341 	}
7342 
7343 	do_flr = ((bus->sih->buscorerev != PCIE_REV_FOR_4378A0) &&
7344 			(bus->sih->buscorerev != PCIE_REV_FOR_4378B0));
7345 
7346 	if (MULTIBP_ENAB(bus->sih) && !do_flr) {
7347 		dhd_bus_pcie_pwr_req(bus);
7348 	}
7349 
7350 	/* To enter download state, disable ARM and reset SOCRAM.
7351 	 * To exit download state, simply reset ARM (default is RAM boot).
7352 	 */
7353 	if (enter) {
7354 #ifndef BCMQT	/* for performance reasons, skip the FLR for QT */
7355 #endif /* !BCMQT */
7356 
7357 		/* Make sure BAR1 maps to backplane address 0 */
7358 		dhdpcie_setbar1win(bus, 0x00000000);
7359 		bus->alp_only = TRUE;
7360 
7361 		/* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
7362 		cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
7363 
7364 		if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
7365 		    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) &&
7366 		    !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
7367 			DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
7368 			bcmerror = BCME_ERROR;
7369 			goto fail;
7370 		}
7371 
7372 		if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
7373 			/* Halt ARM & remove reset */
7374 			si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
7375 			if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) {
7376 				DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__));
7377 				bcmerror = BCME_ERROR;
7378 				goto fail;
7379 			}
7380 			si_core_reset(bus->sih, 0, 0);
7381 			/* reset last 4 bytes of RAM address. to be used for shared area */
7382 			dhdpcie_init_shared_addr(bus);
7383 		} else if (cr4_regs == NULL) { /* no CR4 present on chip */
7384 			si_core_disable(bus->sih, 0);
7385 
7386 			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
7387 				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
7388 				bcmerror = BCME_ERROR;
7389 				goto fail;
7390 			}
7391 
7392 			si_core_reset(bus->sih, 0, 0);
7393 
7394 			/* Clear the top bit of memory */
7395 			if (bus->ramsize) {
7396 				uint32 zeros = 0;
7397 				if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4,
7398 				                     (uint8*)&zeros, 4) < 0) {
7399 					bcmerror = BCME_ERROR;
7400 					goto fail;
7401 				}
7402 			}
7403 		} else {
7404 			/* For CR4,
7405 			 * Halt ARM
7406 			 * Remove ARM reset
7407 			 * Read RAM base address [0x18_0000]
7408 			 * [next] Download firmware
7409 			 * [done at else] Populate the reset vector
7410 			 * [done at else] Remove ARM halt
7411 			*/
7412 
7413 			if (bus->sih->chip == CYW55560_CHIP_ID) {
7414 
7415 				/* Skip ARM halt and reset in case of 55560 */
7416 
7417 				/* Bootloader host pre handshake function */
7418 				if ((bcmerror = dhdpcie_dongle_host_pre_handshake(bus->sih,
7419 					bus->osh, &bl_hs_addrs))) {
7420 					DHD_ERROR(("%s: error %d dongle host pre handshake\n",
7421 						__FUNCTION__, bcmerror));
7422 					goto fail;
7423 				}
7424 				DHD_ERROR(("%s: dongle host pre handshake successful, dl FW\n",
7425 					__FUNCTION__));
7426 
7427 				/* Read PCIE shared structure here */
7428 				/* This is necessary for console buffer initialization */
7429 				if ((bcmerror = dhdpcie_readshared_console(bus)) < 0) {
7430 					DHD_ERROR(("%s: Shared region not initialized\n",
7431 						__FUNCTION__));
7432 				}
7433 
7434 				/* Console buffer read - First pass */
7435 				if ((bcmerror = dhdpcie_bus_readconsole(bus)) < 0) {
7436 					DHD_ERROR(("%s: First pass console buffer read failed\n",
7437 						__FUNCTION__));
7438 				}
7439 			} else {
7440 				/* Halt ARM & remove reset */
7441 				si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
7442 				if (BCM43602_CHIP(bus->sih->chip)) {
7443 					W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX,
7444 						5);
7445 					W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA,
7446 						0);
7447 					W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX,
7448 						7);
7449 					W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA,
7450 						0);
7451 				}
7452 				/* reset last 4 bytes of RAM address. to be used for shared area */
7453 				dhdpcie_init_shared_addr(bus);
7454 			}
7455 		}
7456 	} else {
7457 		if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
7458 			/* write vars */
7459 			if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
7460 				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
7461 				goto fail;
7462 			}
7463 			/* write random numbers to sysmem for the purpose of
7464 			 * randomizing heap address space.
7465 			 */
7466 			if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
7467 				DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
7468 					__FUNCTION__));
7469 				goto fail;
7470 			}
7471 			/* switch back to arm core again */
7472 			if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
7473 				DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__));
7474 				bcmerror = BCME_ERROR;
7475 				goto fail;
7476 			}
7477 			/* write address 0 with reset instruction */
7478 			bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
7479 				(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
7480 			/* now remove reset and halt and continue to run CA7 */
7481 		} else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
7482 			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
7483 				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
7484 				bcmerror = BCME_ERROR;
7485 				goto fail;
7486 			}
7487 
7488 			if (!si_iscoreup(bus->sih)) {
7489 				DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
7490 				bcmerror = BCME_ERROR;
7491 				goto fail;
7492 			}
7493 
7494 			/* Enable remap before ARM reset but after vars.
7495 			 * No backplane access in remap mode
7496 			 */
7497 			if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
7498 			    !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
7499 				DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
7500 				bcmerror = BCME_ERROR;
7501 				goto fail;
7502 			}
7503 
7504 			if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
7505 			    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
7506 				DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
7507 				bcmerror = BCME_ERROR;
7508 				goto fail;
7509 			}
7510 		} else {
7511 			if (BCM43602_CHIP(bus->sih->chip)) {
7512 				/* Firmware crashes on SOCSRAM access when core is in reset */
7513 				if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
7514 					DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
7515 						__FUNCTION__));
7516 					bcmerror = BCME_ERROR;
7517 					goto fail;
7518 				}
7519 				si_core_reset(bus->sih, 0, 0);
7520 				si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
7521 			}
7522 
7523 			if (bus->sih->chip == CYW55560_CHIP_ID) {
7524 				/* Console buffer read - Second pass */
7525 				if ((bcmerror = dhdpcie_bus_readconsole(bus)) < 0) {
7526 					DHD_ERROR(("%s: Second pass console buffer read failed\n",
7527 						__FUNCTION__));
7528 				}
7529 
7530 				/* FW and NVRAM download done notification to bootloader */
7531 				if ((bcmerror = dhdpcie_dongle_host_post_handshake(bus->sih,
7532 					bus->osh, &bl_hs_addrs))) {
7533 					DHD_ERROR(("%s: error %d dongle host post handshake\n",
7534 						__FUNCTION__, bcmerror));
7535 					goto fail;
7536 				}
7537 				DHD_ERROR(("%s: FW download successful\n", __FUNCTION__));
7538 
7539 				/*
7540 				 * Check signature validation function
7541 				 * D2H_VALDN_DONE bit will be set in the following cases:
7542 				 * 1. Open mode: when a signature is not sent
7543 				 * 2. Secure mode: when a valid signature is sent
7544 				 * Write vars and nvram download only if the D2H_VALDN_DONE
7545 				 * bit has been set
7546 				 */
7547 
7548 				if ((bcmerror = dhdpcie_dongle_host_chk_validation(bus->sih,
7549 					bus->osh, &bl_hs_addrs))) {
7550 					DHD_ERROR(("%s: error %d dongle host validation\n",
7551 						__FUNCTION__, bcmerror));
7552 					goto fail;
7553 				}
7554 			}
7555 
7556 			/* write vars */
7557 			if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
7558 				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
7559 				goto fail;
7560 			}
7561 
7562 			/* write a random number to TCM for the purpose of
7563 			 * randomizing heap address space.
7564 			 */
7565 			if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
7566 				DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
7567 					__FUNCTION__));
7568 				goto fail;
7569 			}
7570 
7571 			/* switch back to arm core again */
7572 			if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
7573 				DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
7574 				bcmerror = BCME_ERROR;
7575 				goto fail;
7576 			}
7577 
7578 			/* write address 0 with reset instruction */
7579 			if (bus->sih->chip != CYW55560_CHIP_ID) {
7580 				bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
7581 					(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
7582 
7583 				if (bcmerror == BCME_OK) {
7584 					uint32 tmp;
7585 
7586 					bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0,
7587 						(uint8 *)&tmp, sizeof(tmp));
7588 
7589 					if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
7590 						DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
7591 							__FUNCTION__, bus->resetinstr));
7592 						DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
7593 							__FUNCTION__, tmp));
7594 						bcmerror = BCME_ERROR;
7595 						goto fail;
7596 					}
7597 				}
7598 			}
7599 
7600 			/* now remove reset and halt and continue to run CR4 */
7601 		}
7602 
7603 		if (bus->sih->chip == CYW55560_CHIP_ID) {
7604 			/* Console buffer read - Final pass */
7605 			if ((bcmerror = dhdpcie_bus_readconsole(bus)) < 0) {
7606 				DHD_ERROR(("%s: Final pass console buffer read failed\n",
7607 					__FUNCTION__));
7608 			}
7609 
7610 			/* Set write_vars done bit to let BL jump to mainline FW */
7611 			if ((bcmerror = dhdpcie_dongle_host_post_varswrite(bus, &bl_hs_addrs))) {
7612 					DHD_ERROR(("%s: error %d dongle_host_post_varswrite\n",
7613 					__FUNCTION__, bcmerror));
7614 				goto fail;
7615 			}
7616 			DHD_ERROR(("%s VARS done bit set, BL can jump to mainline FW\n",
7617 				__FUNCTION__));
7618 		} else {
7619 			si_core_reset(bus->sih, 0, 0);
7620 		}
7621 		/* Allow HT Clock now that the ARM is running. */
7622 		bus->alp_only = FALSE;
7623 
7624 		bus->dhd->busstate = DHD_BUS_LOAD;
7625 	}
7626 
7627 fail:
7628 
7629 	if (bcmerror) {
7630 		if (bus->sih->chip == CYW55560_CHIP_ID) {
7631 			/* Read the shared structure to determine console address */
7632 			if (dhdpcie_readshared_console(bus) < 0) {
7633 				DHD_ERROR(("%s: Shared region not initialized\n",
7634 					__FUNCTION__));
7635 			} else {
7636 				/* Console buffer read */
7637 				if (dhdpcie_bus_readconsole(bus) < 0) {
7638 					DHD_ERROR(("%s: Failure case console buffer read failed\n",
7639 						__FUNCTION__));
7640 				}
7641 			}
7642 		}
7643 	}
7644 
7645 	/* Always return to PCIE core */
7646 	si_setcore(bus->sih, PCIE2_CORE_ID, 0);
7647 
7648 	if (MULTIBP_ENAB(bus->sih) && !do_flr) {
7649 		dhd_bus_pcie_pwr_req_clear(bus);
7650 	}
7651 
7652 	return bcmerror;
7653 } /* dhdpcie_bus_download_state */
7654 
7655 static int
dhdpcie_dongle_host_get_handshake_address(si_t * sih,osl_t * osh,hs_addrs_t * addr)7656 dhdpcie_dongle_host_get_handshake_address(si_t *sih, osl_t *osh, hs_addrs_t *addr)
7657 {
7658 	int bcmerror = BCME_OK;
7659 
7660 #ifndef HS_IN_TCM
7661 	sbpcieregs_t *pcieregs;
7662 
7663 	pcieregs = si_setcore(sih, PCIE2_CORE_ID, 0);
7664 	if (!pcieregs) {
7665 		return BCME_ERROR;
7666 	}
7667 	addr->d2h = &pcieregs->u1.dar_64.d2h_msg_reg0;
7668 	addr->h2d = &pcieregs->u1.dar_64.h2d_msg_reg0;
7669 #else /* HS_IN_TCM */
7670 	addr->d2h = (void *)HS_IN_TCM;
7671 	addr->h2d = (volatile uint32 *)addr->d2h + 1;
7672 #endif /* HS_IN_TCM */
7673 
7674 	return bcmerror;
7675 } /* dhdpcie_dongle_host_get_handshake_address */
7676 
7677 static int
dhdpcie_handshake_msg_reg_write(si_t * sih,osl_t * osh,volatile void * addr,uint * buffer)7678 dhdpcie_handshake_msg_reg_write(si_t *sih, osl_t *osh, volatile void *addr, uint *buffer)
7679 {
7680 	int bcmerror = BCME_OK;
7681 
7682 #ifndef HS_IN_TCM
7683 	si_setcore(sih, PCIE2_CORE_ID, 0);
7684 	W_REG(osh, (volatile uint32 *)addr, *buffer);
7685 #else
7686 	bcmerror = si_backplane_access(sih, addr, 4, buffer, FALSE);
7687 #endif // endif
7688 	return bcmerror;
7689 } /* dhdpcie_handshake_msg_reg_write */
7690 
7691 static int
dhdpcie_handshake_msg_reg_read(si_t * sih,osl_t * osh,volatile void * addr,uint * buffer)7692 dhdpcie_handshake_msg_reg_read(si_t *sih, osl_t *osh, volatile void *addr, uint *buffer)
7693 {
7694 	int bcmerror = BCME_OK;
7695 
7696 #ifndef HS_IN_TCM
7697 	si_setcore(sih, PCIE2_CORE_ID, 0);
7698 	*buffer = R_REG(osh, (volatile uint32 *)addr);
7699 #else
7700 	bcmerror = si_backplane_access(sih, addr, 4, buffer, TRUE);
7701 #endif // endif
7702 	return bcmerror;
7703 } /* dhdpcie_handshake_msg_reg_read */
7704 
7705 static int
dhdpcie_dongle_host_handshake_spinwait(si_t * sih,osl_t * osh,volatile void * addr,uint32 bitshift,uint32 us)7706 dhdpcie_dongle_host_handshake_spinwait(si_t *sih, osl_t *osh, volatile void *addr, uint32 bitshift,
7707 	uint32 us)
7708 {
7709 	uint32 countdown_;
7710 	uint32 read_addr = 0;
7711 	int bcmerror = BCME_OK;
7712 
7713 	for (countdown_ = (us) + (HS_POLL_PERIOD_US - 1U); countdown_ >= HS_POLL_PERIOD_US;
7714 		countdown_ -= HS_POLL_PERIOD_US) {
7715 
7716 		bcmerror = dhdpcie_handshake_msg_reg_read(sih, osh, addr, &read_addr);
7717 		if (bcmerror) {
7718 			bcmerror = BCME_ERROR;
7719 			break;
7720 		}
7721 
7722 		if (isset(&read_addr, bitshift)) {
7723 			bcmerror = BCME_OK;
7724 			break;
7725 		}
7726 
7727 		OSL_DELAY(HS_POLL_PERIOD_US);
7728 	}
7729 
7730 	if (countdown_ <= HS_POLL_PERIOD_US) {
7731 		bcmerror = BCME_NOTREADY;
7732 	}
7733 
7734 	return bcmerror;
7735 } /* dhdpcie_dongle_host_handshake_spinwait */
7736 
7737 static int
dhdpcie_dongle_host_pre_handshake(si_t * sih,osl_t * osh,hs_addrs_t * addr)7738 dhdpcie_dongle_host_pre_handshake(si_t *sih, osl_t *osh, hs_addrs_t *addr)
7739 {
7740 	int bcmerror = BCME_OK;
7741 	int h2d_reg = 0x00000000;
7742 
7743 	/* Host initialization for dongle to host handshake */
7744 	bcmerror = dhdpcie_handshake_msg_reg_write(sih, osh, addr->h2d, &h2d_reg);
7745 	if (bcmerror) {
7746 		goto err;
7747 	}
7748 
7749 	bcmerror = dhdpcie_dongle_host_handshake_spinwait(sih, osh, addr->d2h, D2H_READY_SHIFT,
7750 		D2H_READY_TIMEOUT_US);
7751 	if (!bcmerror) {
7752 
7753 		/* Set H2D_DL_START indication to dongle that Host shall start FW download */
7754 		h2d_reg = 0;
7755 		setbit(&h2d_reg, H2D_DL_START_SHIFT);
7756 		bcmerror = dhdpcie_handshake_msg_reg_write(sih, osh, addr->h2d, &h2d_reg);
7757 		if (bcmerror) {
7758 			goto err;
7759 		}
7760 	}
7761 
7762 err:
7763 	return bcmerror;
7764 } /* dhdpcie_dongle_host_pre_handshake */
7765 
7766 static int
dhdpcie_dongle_host_post_handshake(si_t * sih,osl_t * osh,hs_addrs_t * addr)7767 dhdpcie_dongle_host_post_handshake(si_t *sih, osl_t *osh, hs_addrs_t *addr)
7768 {
7769 	int bcmerror = BCME_OK;
7770 	int h2d_reg = 0x00000000;
7771 
7772 	/* Reset download start */
7773 	clrbit(&h2d_reg, H2D_DL_START_SHIFT);
7774 
7775 	/* download done */
7776 	setbit(&h2d_reg, H2D_DL_DONE_SHIFT);
7777 	bcmerror = dhdpcie_handshake_msg_reg_write(sih, osh, addr->h2d, &h2d_reg);
7778 	if (bcmerror) {
7779 		goto err;
7780 	}
7781 
7782 	bcmerror = dhdpcie_dongle_host_handshake_spinwait(sih, osh, addr->d2h,
7783 		D2H_TRX_HDR_PARSE_DONE_SHIFT, D2H_TRX_HDR_PARSE_DONE_TIMEOUT_US);
7784 
7785 	if (bcmerror) {
7786 		/* Host notification to bootloader to get reset on error */
7787 		dhdpcie_handshake_msg_reg_read(sih, osh, addr->h2d, &h2d_reg);
7788 		setbit(&h2d_reg, H2D_BL_RESET_ON_ERROR_SHIFT);
7789 		dhdpcie_handshake_msg_reg_write(sih, osh, addr->h2d, &h2d_reg);
7790 	}
7791 
7792 err:
7793 	return bcmerror;
7794 } /* dhdpcie_dongle_host_post_handshake */
7795 
7796 static int
dhdpcie_dongle_host_chk_validation(si_t * sih,osl_t * osh,hs_addrs_t * addr)7797 dhdpcie_dongle_host_chk_validation(si_t *sih, osl_t *osh, hs_addrs_t *addr)
7798 {
7799 	int bcmerror = BCME_OK;
7800 	uint d2h_reg = 0x00000000;
7801 	uint h2d_reg = 0x00000000;
7802 
7803 	bcmerror = dhdpcie_dongle_host_handshake_spinwait(sih, osh, addr->d2h, D2H_VALDN_DONE_SHIFT,
7804 		D2H_VALDN_DONE_TIMEOUT_US);
7805 	if (!bcmerror) {
7806 
7807 		bcmerror = dhdpcie_handshake_msg_reg_read(sih, osh, addr->d2h, &d2h_reg);
7808 		if (!bcmerror) {
7809 
7810 			if (isset(&d2h_reg, D2H_VALDN_RESULT_SHIFT)) {
7811 				DHD_ERROR(("%s: TRX img validation check successful\n",
7812 				__FUNCTION__));
7813 			} else {
7814 				DHD_ERROR(("%s: TRX img validation check failed\n", __FUNCTION__));
7815 				bcmerror = BCME_ERROR;
7816 			}
7817 		}
7818 	}
7819 
7820 	if (bcmerror) {
7821 		/* Host notification to bootloader to get reset on error
7822 		 * To avoid the race condition betweeen host and dongle
7823 		 */
7824 		dhdpcie_handshake_msg_reg_read(sih, osh, addr->h2d, &h2d_reg);
7825 		setbit(&h2d_reg, H2D_BL_RESET_ON_ERROR_SHIFT);
7826 		dhdpcie_handshake_msg_reg_write(sih, osh, addr->h2d, &h2d_reg);
7827 	}
7828 
7829 	return bcmerror;
7830 } /* dhdpcie_dongle_host_chk_validation */
7831 
7832 int
dhdpcie_dongle_host_pre_wd_reset_sequence(si_t * sih,osl_t * osh)7833 dhdpcie_dongle_host_pre_wd_reset_sequence(si_t *sih, osl_t *osh)
7834 {
7835 	int32 bcmerror = BCME_ERROR;
7836 	sbpcieregs_t *pcieregs = NULL;
7837 	uint32 reg_val = 0;
7838 
7839 	if (sih && osh) {
7840 
7841 		pcieregs = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0);
7842 
7843 		/* Host initialization for dongle to host handshake */
7844 		bcmerror = dhdpcie_handshake_msg_reg_write(sih, osh,
7845 			&pcieregs->u1.dar_64.h2d_msg_reg0, &reg_val);
7846 	}
7847 
7848 	return bcmerror;
7849 } /* dhdpcie_dongle_host_pre_wd_reset_sequence */
7850 
7851 int
dhdpcie_dongle_host_post_wd_reset_sequence(si_t * sih,osl_t * osh)7852 dhdpcie_dongle_host_post_wd_reset_sequence(si_t *sih, osl_t *osh)
7853 {
7854 	int32 bcmerror = BCME_ERROR;
7855 	sbpcieregs_t *pcieregs = NULL;
7856 	uint32 reg_val = 0;
7857 	int32 idx = 0;
7858 	int print_interval = D2H_READY_WD_RESET_COUNT / 10;
7859 
7860 	if (sih && osh) {
7861 		pcieregs = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0);
7862 
7863 		/* Host initialization for dongle to host handshake */
7864 		bcmerror = dhdpcie_handshake_msg_reg_write(sih, osh,
7865 			&pcieregs->u1.dar_64.h2d_msg_reg0, &reg_val);
7866 
7867 		for (idx = D2H_READY_WD_RESET_COUNT; idx > 0; idx--) {
7868 
7869 #ifdef BCMQT
7870 			OSL_SLEEP(D2H_READY_WD_RESET_US/1000);
7871 #else
7872 			OSL_DELAY(D2H_READY_WD_RESET_US);
7873 #endif // endif
7874 			if (!(idx % print_interval)) {
7875 				DHD_ERROR(("Waiting %d us for D2H_READY\n",
7876 					idx * D2H_READY_WD_RESET_US));
7877 			}
7878 
7879 			dhdpcie_handshake_msg_reg_read(sih, osh, &pcieregs->u1.dar_64.d2h_msg_reg0,
7880 				&reg_val);
7881 			if (isset(&reg_val, D2H_READY_SHIFT)) {
7882 				break;
7883 			}
7884 		}
7885 
7886 		if (!idx) {
7887 			DHD_ERROR(("%s: error - Waiting for D2H_READY timeout %d\n",
7888 				__FUNCTION__, idx));
7889 		} else {
7890 			bcmerror = BCME_OK;
7891 		}
7892 	}
7893 
7894 	return bcmerror;
7895 } /* dhdpcie_dongle_host_post_wd_reset_sequence */
7896 
7897 /* Pre ChipId access sequence making sure that H2D HS reg is cleared and
7898  * host waited for bootloader to be ready before chipid access.
7899  */
7900 int
dhdpcie_dongle_host_pre_chipid_access_sequence(osl_t * osh,volatile void * regva)7901 dhdpcie_dongle_host_pre_chipid_access_sequence(osl_t *osh, volatile void *regva)
7902 {
7903 	int32 bcmerror = BCME_ERROR;
7904 	sbpcieregs_t *pcieregs = NULL;
7905 	uint32 reg_val = 0;
7906 	int32 idx = 0;
7907 	int print_interval = D2H_READY_WD_RESET_COUNT / 10;
7908 
7909 	if (osh && regva) {
7910 
7911 		pcieregs = (sbpcieregs_t*)(regva);
7912 
7913 		/* Host init for D2H handshake */
7914 		W_REG(osh, &pcieregs->u1.dar_64.h2d_msg_reg0, reg_val);
7915 
7916 		/* Host waits for bootloader to be ready before ChipId access */
7917 		for (idx = D2H_READY_WD_RESET_COUNT; idx > 0; idx--) {
7918 
7919 #ifdef BCMQT
7920 			OSL_SLEEP(D2H_READY_WD_RESET_US/1000);
7921 #else
7922 			OSL_DELAY(D2H_READY_WD_RESET_US);
7923 #endif // endif
7924 			if (!(idx % print_interval)) {
7925 				DHD_ERROR(("Waiting %d us for D2H_READY\n",
7926 					idx * D2H_READY_WD_RESET_US));
7927 			}
7928 			reg_val = R_REG(osh, &pcieregs->u1.dar_64.d2h_msg_reg0);
7929 			if (isset(&reg_val, D2H_READY_SHIFT)) {
7930 				break;
7931 			}
7932 		}
7933 
7934 		if (!idx) {
7935 			DHD_ERROR(("%s: error - Waiting for D2H_READY timeout %d\n",
7936 				__FUNCTION__, idx));
7937 		} else {
7938 			bcmerror = BCME_OK;
7939 		}
7940 	}
7941 
7942 	return bcmerror;
7943 } /* dhdpcie_dongle_host_pre_chipid_access_sequence */
7944 
7945 static int
dhdpcie_dongle_host_post_varswrite(dhd_bus_t * bus,hs_addrs_t * addr)7946 dhdpcie_dongle_host_post_varswrite(dhd_bus_t *bus, hs_addrs_t *addr)
7947 {
7948 	int bcmerror = BCME_OK;
7949 	uint h2d_reg = 0x00000000;
7950 
7951 	/* Set NVRAM done bit (Download done is already set) */
7952 	setbit(&h2d_reg, H2D_DL_DONE_SHIFT);
7953 	setbit(&h2d_reg, H2D_DL_NVRAM_DONE_SHIFT);
7954 	bcmerror = dhdpcie_handshake_msg_reg_write(bus->sih, bus->osh, addr->h2d, &h2d_reg);
7955 
7956 	return bcmerror;
7957 } /* dhdpcie_dongle_host_post_varswrite */
7958 
7959 static int
dhdpcie_bus_write_vars(dhd_bus_t * bus)7960 dhdpcie_bus_write_vars(dhd_bus_t *bus)
7961 {
7962 	int bcmerror = 0;
7963 	uint32 varsize, phys_size;
7964 	uint32 varaddr;
7965 	uint8 *vbuffer;
7966 	uint32 varsizew;
7967 #ifdef DHD_DEBUG
7968 	uint8 *nvram_ularray;
7969 #endif /* DHD_DEBUG */
7970 
7971 	/* Even if there are no vars are to be written, we still need to set the ramsize. */
7972 	varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
7973 	varaddr = (bus->ramsize - 4) - varsize;
7974 
7975 	varaddr += bus->dongle_ram_base;
7976 
7977 	if (bus->vars) {
7978 
7979 		vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
7980 		if (!vbuffer)
7981 			return BCME_NOMEM;
7982 
7983 		bzero(vbuffer, varsize);
7984 		bcopy(bus->vars, vbuffer, bus->varsz);
7985 		/* Write the vars list */
7986 		bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
7987 
7988 		/* Implement read back and verify later */
7989 #ifdef DHD_DEBUG
7990 		/* Verify NVRAM bytes */
7991 		DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
7992 		nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
7993 		if (!nvram_ularray) {
7994 			MFREE(bus->dhd->osh, vbuffer, varsize);
7995 			return BCME_NOMEM;
7996 		}
7997 
7998 		/* Upload image to verify downloaded contents. */
7999 		memset(nvram_ularray, 0xaa, varsize);
8000 
8001 		/* Read the vars list to temp buffer for comparison */
8002 		bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
8003 		if (bcmerror) {
8004 				DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
8005 					__FUNCTION__, bcmerror, varsize, varaddr));
8006 		}
8007 
8008 		/* Compare the org NVRAM with the one read from RAM */
8009 		if (memcmp(vbuffer, nvram_ularray, varsize)) {
8010 			DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
8011 		} else
8012 			DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
8013 			__FUNCTION__));
8014 
8015 		MFREE(bus->dhd->osh, nvram_ularray, varsize);
8016 #endif /* DHD_DEBUG */
8017 
8018 		MFREE(bus->dhd->osh, vbuffer, varsize);
8019 	}
8020 
8021 	phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
8022 
8023 	phys_size += bus->dongle_ram_base;
8024 
8025 	/* adjust to the user specified RAM */
8026 	DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
8027 		phys_size, bus->ramsize));
8028 	DHD_INFO(("Vars are at %d, orig varsize is %d\n",
8029 		varaddr, varsize));
8030 	varsize = ((phys_size - 4) - varaddr);
8031 
8032 	/*
8033 	 * Determine the length token:
8034 	 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
8035 	 */
8036 	if (bcmerror) {
8037 		varsizew = 0;
8038 		bus->nvram_csm = varsizew;
8039 	} else {
8040 		varsizew = varsize / 4;
8041 		varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
8042 		bus->nvram_csm = varsizew;
8043 		varsizew = htol32(varsizew);
8044 	}
8045 
8046 	DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
8047 
8048 	/* Write the length token to the last word */
8049 	bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4),
8050 		(uint8*)&varsizew, 4);
8051 
8052 	return bcmerror;
8053 } /* dhdpcie_bus_write_vars */
8054 
8055 int
dhdpcie_downloadvars(dhd_bus_t * bus,void * arg,int len)8056 dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len)
8057 {
8058 	int bcmerror = BCME_OK;
8059 
8060 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
8061 
8062 	/* Basic sanity checks */
8063 	if (bus->dhd->up) {
8064 		bcmerror = BCME_NOTDOWN;
8065 		goto err;
8066 	}
8067 	if (!len) {
8068 		bcmerror = BCME_BUFTOOSHORT;
8069 		goto err;
8070 	}
8071 
8072 	/* Free the old ones and replace with passed variables */
8073 	if (bus->vars)
8074 		MFREE(bus->dhd->osh, bus->vars, bus->varsz);
8075 
8076 	bus->vars = MALLOC(bus->dhd->osh, len);
8077 	bus->varsz = bus->vars ? len : 0;
8078 	if (bus->vars == NULL) {
8079 		bcmerror = BCME_NOMEM;
8080 		goto err;
8081 	}
8082 
8083 	/* Copy the passed variables, which should include the terminating double-null */
8084 	bcopy(arg, bus->vars, bus->varsz);
8085 
8086 #ifdef DHD_USE_SINGLE_NVRAM_FILE
8087 	if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
8088 		char *sp = NULL;
8089 		char *ep = NULL;
8090 		int i;
8091 		char tag[2][8] = {"ccode=", "regrev="};
8092 
8093 		/* Find ccode and regrev info */
8094 		for (i = 0; i < 2; i++) {
8095 			sp = strnstr(bus->vars, tag[i], bus->varsz);
8096 			if (!sp) {
8097 				DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n",
8098 					__FUNCTION__, bus->nv_path));
8099 				bcmerror = BCME_ERROR;
8100 				goto err;
8101 			}
8102 			sp = strchr(sp, '=');
8103 			ep = strchr(sp, '\0');
8104 			/* We assumed that string length of both ccode and
8105 			 * regrev values should not exceed WLC_CNTRY_BUF_SZ
8106 			 */
8107 			if (ep && ((ep - sp) <= WLC_CNTRY_BUF_SZ)) {
8108 				sp++;
8109 				while (*sp != '\0') {
8110 					DHD_INFO(("%s: parse '%s', current sp = '%c'\n",
8111 						__FUNCTION__, tag[i], *sp));
8112 					*sp++ = '0';
8113 				}
8114 			} else {
8115 				DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n",
8116 					__FUNCTION__, tag[i]));
8117 				bcmerror = BCME_ERROR;
8118 				goto err;
8119 			}
8120 		}
8121 	}
8122 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
8123 
8124 err:
8125 	return bcmerror;
8126 }
8127 
8128 /* loop through the capability list and see if the pcie capabilty exists */
8129 uint8
dhdpcie_find_pci_capability(osl_t * osh,uint8 req_cap_id)8130 dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id)
8131 {
8132 	uint8 cap_id;
8133 	uint8 cap_ptr = 0;
8134 	uint8 byte_val;
8135 
8136 	/* check for Header type 0 */
8137 	byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
8138 	if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) {
8139 		DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__));
8140 		goto end;
8141 	}
8142 
8143 	/* check if the capability pointer field exists */
8144 	byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
8145 	if (!(byte_val & PCI_CAPPTR_PRESENT)) {
8146 		DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__));
8147 		goto end;
8148 	}
8149 
8150 	cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
8151 	/* check if the capability pointer is 0x00 */
8152 	if (cap_ptr == 0x00) {
8153 		DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__));
8154 		goto end;
8155 	}
8156 
8157 	/* loop thr'u the capability list and see if the pcie capabilty exists */
8158 
8159 	cap_id = read_pci_cfg_byte(cap_ptr);
8160 
8161 	while (cap_id != req_cap_id) {
8162 		cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
8163 		if (cap_ptr == 0x00) break;
8164 		cap_id = read_pci_cfg_byte(cap_ptr);
8165 	}
8166 
8167 end:
8168 	return cap_ptr;
8169 }
8170 
8171 void
dhdpcie_pme_active(osl_t * osh,bool enable)8172 dhdpcie_pme_active(osl_t *osh, bool enable)
8173 {
8174 	uint8 cap_ptr;
8175 	uint32 pme_csr;
8176 
8177 	cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
8178 
8179 	if (!cap_ptr) {
8180 		DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
8181 		return;
8182 	}
8183 
8184 	pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32));
8185 	DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr));
8186 
8187 	pme_csr |= PME_CSR_PME_STAT;
8188 	if (enable) {
8189 		pme_csr |= PME_CSR_PME_EN;
8190 	} else {
8191 		pme_csr &= ~PME_CSR_PME_EN;
8192 	}
8193 
8194 	OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr);
8195 }
8196 
8197 bool
dhdpcie_pme_cap(osl_t * osh)8198 dhdpcie_pme_cap(osl_t *osh)
8199 {
8200 	uint8 cap_ptr;
8201 	uint32 pme_cap;
8202 
8203 	cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
8204 
8205 	if (!cap_ptr) {
8206 		DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
8207 		return FALSE;
8208 	}
8209 
8210 	pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32));
8211 
8212 	DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap));
8213 
8214 	return ((pme_cap & PME_CAP_PM_STATES) != 0);
8215 }
8216 
8217 uint32
dhdpcie_lcreg(osl_t * osh,uint32 mask,uint32 val)8218 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val)
8219 {
8220 
8221 	uint8	pcie_cap;
8222 	uint8	lcreg_offset;	/* PCIE capability LCreg offset in the config space */
8223 	uint32	reg_val;
8224 
8225 	pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
8226 
8227 	if (!pcie_cap) {
8228 		DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
8229 		return 0;
8230 	}
8231 
8232 	lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
8233 
8234 	/* set operation */
8235 	if (mask) {
8236 		/* read */
8237 		reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
8238 
8239 		/* modify */
8240 		reg_val &= ~mask;
8241 		reg_val |= (mask & val);
8242 
8243 		/* write */
8244 		OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
8245 	}
8246 	return OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
8247 }
8248 
8249 uint8
dhdpcie_clkreq(osl_t * osh,uint32 mask,uint32 val)8250 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val)
8251 {
8252 	uint8	pcie_cap;
8253 	uint32	reg_val;
8254 	uint8	lcreg_offset;	/* PCIE capability LCreg offset in the config space */
8255 
8256 	pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
8257 
8258 	if (!pcie_cap) {
8259 		DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
8260 		return 0;
8261 	}
8262 
8263 	lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
8264 
8265 	reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
8266 	/* set operation */
8267 	if (mask) {
8268 		if (val)
8269 			reg_val |= PCIE_CLKREQ_ENAB;
8270 		else
8271 			reg_val &= ~PCIE_CLKREQ_ENAB;
8272 		OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
8273 		reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
8274 	}
8275 	if (reg_val & PCIE_CLKREQ_ENAB)
8276 		return 1;
8277 	else
8278 		return 0;
8279 }
8280 
dhd_dump_intr_counters(dhd_pub_t * dhd,struct bcmstrbuf * strbuf)8281 void dhd_dump_intr_counters(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
8282 {
8283 	dhd_bus_t *bus;
8284 	uint64 current_time = OSL_LOCALTIME_NS();
8285 
8286 	if (!dhd) {
8287 		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
8288 		return;
8289 	}
8290 
8291 	bus = dhd->bus;
8292 	if (!bus) {
8293 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
8294 		return;
8295 	}
8296 
8297 	bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n");
8298 	bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
8299 		"isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
8300 		"dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
8301 		bus->resume_intr_enable_count, bus->dpc_intr_enable_count,
8302 		bus->isr_intr_disable_count, bus->suspend_intr_disable_count,
8303 		bus->dpc_return_busdown_count, bus->non_ours_irq_count);
8304 #ifdef BCMPCIE_OOB_HOST_WAKE
8305 	bcm_bprintf(strbuf, "oob_intr_count=%lu oob_intr_enable_count=%lu"
8306 		" oob_intr_disable_count=%lu\noob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT
8307 		" last_oob_irq_enable_time="SEC_USEC_FMT"\nlast_oob_irq_disable_time="SEC_USEC_FMT
8308 		" oob_irq_enabled=%d oob_gpio_level=%d\n",
8309 		bus->oob_intr_count, bus->oob_intr_enable_count,
8310 		bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(bus),
8311 		GET_SEC_USEC(bus->last_oob_irq_time), GET_SEC_USEC(bus->last_oob_irq_enable_time),
8312 		GET_SEC_USEC(bus->last_oob_irq_disable_time), dhdpcie_get_oob_irq_status(bus),
8313 		dhdpcie_get_oob_irq_level());
8314 #endif /* BCMPCIE_OOB_HOST_WAKE */
8315 	bcm_bprintf(strbuf, "\ncurrent_time="SEC_USEC_FMT" isr_entry_time="SEC_USEC_FMT
8316 		" isr_exit_time="SEC_USEC_FMT"\ndpc_sched_time="SEC_USEC_FMT
8317 		" last_non_ours_irq_time="SEC_USEC_FMT" dpc_entry_time="SEC_USEC_FMT"\n"
8318 		"last_process_ctrlbuf_time="SEC_USEC_FMT " last_process_flowring_time="SEC_USEC_FMT
8319 		" last_process_txcpl_time="SEC_USEC_FMT"\nlast_process_rxcpl_time="SEC_USEC_FMT
8320 		" last_process_infocpl_time="SEC_USEC_FMT" last_process_edl_time="SEC_USEC_FMT
8321 		"\ndpc_exit_time="SEC_USEC_FMT" resched_dpc_time="SEC_USEC_FMT"\n"
8322 		"last_d3_inform_time="SEC_USEC_FMT"\n",
8323 		GET_SEC_USEC(current_time), GET_SEC_USEC(bus->isr_entry_time),
8324 		GET_SEC_USEC(bus->isr_exit_time), GET_SEC_USEC(bus->dpc_sched_time),
8325 		GET_SEC_USEC(bus->last_non_ours_irq_time), GET_SEC_USEC(bus->dpc_entry_time),
8326 		GET_SEC_USEC(bus->last_process_ctrlbuf_time),
8327 		GET_SEC_USEC(bus->last_process_flowring_time),
8328 		GET_SEC_USEC(bus->last_process_txcpl_time),
8329 		GET_SEC_USEC(bus->last_process_rxcpl_time),
8330 		GET_SEC_USEC(bus->last_process_infocpl_time),
8331 		GET_SEC_USEC(bus->last_process_edl_time),
8332 		GET_SEC_USEC(bus->dpc_exit_time), GET_SEC_USEC(bus->resched_dpc_time),
8333 		GET_SEC_USEC(bus->last_d3_inform_time));
8334 
8335 	bcm_bprintf(strbuf, "\nlast_suspend_start_time="SEC_USEC_FMT" last_suspend_end_time="
8336 		SEC_USEC_FMT" last_resume_start_time="SEC_USEC_FMT" last_resume_end_time="
8337 		SEC_USEC_FMT"\n", GET_SEC_USEC(bus->last_suspend_start_time),
8338 		GET_SEC_USEC(bus->last_suspend_end_time),
8339 		GET_SEC_USEC(bus->last_resume_start_time),
8340 		GET_SEC_USEC(bus->last_resume_end_time));
8341 
8342 #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
8343 		bcm_bprintf(strbuf, "logtrace_thread_entry_time="SEC_USEC_FMT
8344 			" logtrace_thread_sem_down_time="SEC_USEC_FMT
8345 			"\nlogtrace_thread_flush_time="SEC_USEC_FMT
8346 			" logtrace_thread_unexpected_break_time="SEC_USEC_FMT
8347 			"\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
8348 			GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
8349 			GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
8350 			GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
8351 			GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
8352 			GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time));
8353 #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
8354 }
8355 
dhd_dump_intr_registers(dhd_pub_t * dhd,struct bcmstrbuf * strbuf)8356 void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
8357 {
8358 	uint32 intstatus = 0;
8359 	uint32 intmask = 0;
8360 	uint32 d2h_db0 = 0;
8361 	uint32 d2h_mb_data = 0;
8362 
8363 	intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
8364 		dhd->bus->pcie_mailbox_int, 0, 0);
8365 	intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
8366 		dhd->bus->pcie_mailbox_mask, 0, 0);
8367 	d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
8368 	dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
8369 
8370 	bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
8371 		intstatus, intmask, d2h_db0);
8372 	bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n",
8373 		d2h_mb_data, dhd->bus->def_intmask);
8374 }
8375 /** Add bus dump output to a buffer */
dhd_bus_dump(dhd_pub_t * dhdp,struct bcmstrbuf * strbuf)8376 void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
8377 {
8378 	uint16 flowid;
8379 	int ix = 0;
8380 	flow_ring_node_t *flow_ring_node;
8381 	flow_info_t *flow_info;
8382 #ifdef TX_STATUS_LATENCY_STATS
8383 	uint8 ifindex;
8384 	if_flow_lkup_t *if_flow_lkup;
8385 	dhd_if_tx_status_latency_t if_tx_status_latency[DHD_MAX_IFS];
8386 #endif /* TX_STATUS_LATENCY_STATS */
8387 
8388 	if (dhdp->busstate != DHD_BUS_DATA)
8389 		return;
8390 
8391 #ifdef TX_STATUS_LATENCY_STATS
8392 	memset(if_tx_status_latency, 0, sizeof(if_tx_status_latency));
8393 #endif /* TX_STATUS_LATENCY_STATS */
8394 #ifdef DHD_WAKE_STATUS
8395 	bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n",
8396 		bcmpcie_get_total_wake(dhdp->bus), dhdp->bus->wake_counts.rxwake,
8397 		dhdp->bus->wake_counts.rcwake);
8398 #ifdef DHD_WAKE_RX_STATUS
8399 	bcm_bprintf(strbuf, " unicast %u muticast %u broadcast %u arp %u\n",
8400 		dhdp->bus->wake_counts.rx_ucast, dhdp->bus->wake_counts.rx_mcast,
8401 		dhdp->bus->wake_counts.rx_bcast, dhdp->bus->wake_counts.rx_arp);
8402 	bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n",
8403 		dhdp->bus->wake_counts.rx_multi_ipv4, dhdp->bus->wake_counts.rx_multi_ipv6,
8404 		dhdp->bus->wake_counts.rx_icmpv6, dhdp->bus->wake_counts.rx_multi_other);
8405 	bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n",
8406 		dhdp->bus->wake_counts.rx_icmpv6_ra, dhdp->bus->wake_counts.rx_icmpv6_na,
8407 		dhdp->bus->wake_counts.rx_icmpv6_ns);
8408 #endif /* DHD_WAKE_RX_STATUS */
8409 #ifdef DHD_WAKE_EVENT_STATUS
8410 	for (flowid = 0; flowid < WLC_E_LAST; flowid++)
8411 		if (dhdp->bus->wake_counts.rc_event[flowid] != 0)
8412 			bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(flowid),
8413 				dhdp->bus->wake_counts.rc_event[flowid]);
8414 	bcm_bprintf(strbuf, "\n");
8415 #endif /* DHD_WAKE_EVENT_STATUS */
8416 #endif /* DHD_WAKE_STATUS */
8417 
8418 	dhd_prot_print_info(dhdp, strbuf);
8419 	dhd_dump_intr_registers(dhdp, strbuf);
8420 	dhd_dump_intr_counters(dhdp, strbuf);
8421 	bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
8422 		dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr);
8423 	bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr));
8424 #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
8425 	bcm_bprintf(strbuf, "multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
8426 		dhdp->multi_client_flow_rings, dhdp->max_multi_client_flow_rings);
8427 #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
8428 	bcm_bprintf(strbuf,
8429 		"%4s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ",
8430 		"Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen",
8431 		" Overflows", "  RD", "  WR");
8432 
8433 #ifdef TX_STATUS_LATENCY_STATS
8434 	/* Average Tx status/Completion Latency in micro secs */
8435 	bcm_bprintf(strbuf, "%16s %16s ", "       NumTxPkts", "    AvgTxCmpL_Us");
8436 #endif /* TX_STATUS_LATENCY_STATS */
8437 
8438 	bcm_bprintf(strbuf, "\n");
8439 
8440 	for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
8441 		flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
8442 		if (!flow_ring_node->active)
8443 			continue;
8444 
8445 		flow_info = &flow_ring_node->flow_info;
8446 		bcm_bprintf(strbuf,
8447 			"%4d %4d %2d %4d "MACDBG" %4d %4d %6d %10u ", ix++,
8448 			flow_ring_node->flowid, flow_info->ifindex, flow_info->tid,
8449 			MAC2STRDBG(flow_info->da),
8450 			DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue),
8451 			DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)),
8452 			DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node->queue)),
8453 			DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue));
8454 		dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf,
8455 			"%4d %4d ");
8456 
8457 #ifdef TX_STATUS_LATENCY_STATS
8458 		bcm_bprintf(strbuf, "%16d %16d ",
8459 			flow_info->num_tx_pkts,
8460 			flow_info->num_tx_status ?
8461 			DIV_U64_BY_U64(flow_info->cum_tx_status_latency,
8462 			flow_info->num_tx_status) : 0);
8463 
8464 		ifindex = flow_info->ifindex;
8465 		ASSERT(ifindex < DHD_MAX_IFS);
8466 		if (ifindex < DHD_MAX_IFS) {
8467 			if_tx_status_latency[ifindex].num_tx_status += flow_info->num_tx_status;
8468 			if_tx_status_latency[ifindex].cum_tx_status_latency +=
8469 				flow_info->cum_tx_status_latency;
8470 		} else {
8471 			DHD_ERROR(("%s: Bad IF index: %d associated with flowid: %d\n",
8472 				__FUNCTION__, ifindex, flowid));
8473 		}
8474 #endif /* TX_STATUS_LATENCY_STATS */
8475 		bcm_bprintf(strbuf, "\n");
8476 	}
8477 
8478 #ifdef TX_STATUS_LATENCY_STATS
8479 	bcm_bprintf(strbuf, "\n%s  %16s  %16s\n", "If", "AvgTxCmpL_Us", "NumTxStatus");
8480 	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
8481 	for (ix = 0; ix < DHD_MAX_IFS; ix++) {
8482 		if (!if_flow_lkup[ix].status) {
8483 			continue;
8484 		}
8485 		bcm_bprintf(strbuf, "%2d  %16d  %16d\n",
8486 			ix,
8487 			if_tx_status_latency[ix].num_tx_status ?
8488 			DIV_U64_BY_U64(if_tx_status_latency[ix].cum_tx_status_latency,
8489 			if_tx_status_latency[ix].num_tx_status): 0,
8490 			if_tx_status_latency[ix].num_tx_status);
8491 	}
8492 #endif /* TX_STATUS_LATENCY_STATS */
8493 
8494 #ifdef DHD_HP2P
8495 	if (dhdp->hp2p_capable) {
8496 		bcm_bprintf(strbuf, "\n%s  %16s  %16s", "Flowid", "Tx_t0", "Tx_t1");
8497 
8498 		for (flowid = 0; flowid < MAX_HP2P_FLOWS; flowid++) {
8499 			hp2p_info_t *hp2p_info;
8500 			int bin;
8501 
8502 			hp2p_info = &dhdp->hp2p_info[flowid];
8503 			if (hp2p_info->num_timer_start == 0)
8504 				continue;
8505 
8506 			bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
8507 			bcm_bprintf(strbuf, "\n%s", "Bin");
8508 
8509 			for (bin = 0; bin < MAX_TX_HIST_BIN; bin++) {
8510 				bcm_bprintf(strbuf, "\n%2d %20d  %16d", bin,
8511 					hp2p_info->tx_t0[bin], hp2p_info->tx_t1[bin]);
8512 			}
8513 
8514 			bcm_bprintf(strbuf, "\n%s  %16s", "Flowid", "Rx_t0");
8515 			bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
8516 			bcm_bprintf(strbuf, "\n%s", "Bin");
8517 
8518 			for (bin = 0; bin < MAX_RX_HIST_BIN; bin++) {
8519 				bcm_bprintf(strbuf, "\n%d %20d", bin,
8520 					hp2p_info->rx_t0[bin]);
8521 			}
8522 
8523 			bcm_bprintf(strbuf, "\n%s  %16s  %16s",
8524 				"Packet limit", "Timer limit", "Timer start");
8525 			bcm_bprintf(strbuf, "\n%d %24d %16d", hp2p_info->num_pkt_limit,
8526 				hp2p_info->num_timer_limit, hp2p_info->num_timer_start);
8527 		}
8528 
8529 		bcm_bprintf(strbuf, "\n");
8530 	}
8531 #endif /* DHD_HP2P */
8532 
8533 	bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt);
8534 	bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt);
8535 	bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt);
8536 	if (dhdp->d2h_hostrdy_supported) {
8537 		bcm_bprintf(strbuf, "hostready count:%d\n", dhdp->bus->hostready_count);
8538 	}
8539 	bcm_bprintf(strbuf, "d2h_intr_method -> %s\n",
8540 		dhdp->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX");
8541 }
8542 
8543 #ifdef DNGL_AXI_ERROR_LOGGING
8544 bool
dhd_axi_sig_match(dhd_pub_t * dhdp)8545 dhd_axi_sig_match(dhd_pub_t *dhdp)
8546 {
8547 	uint32 axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, dhdp->axierror_logbuf_addr);
8548 
8549 	if (dhdp->dhd_induce_error == DHD_INDUCE_DROP_AXI_SIG) {
8550 		DHD_ERROR(("%s: Induce AXI signature drop\n", __FUNCTION__));
8551 		return FALSE;
8552 	}
8553 
8554 	DHD_ERROR(("%s: axi_tcm_addr: 0x%x, tcm range: 0x%x ~ 0x%x\n",
8555 		__FUNCTION__, axi_tcm_addr, dhdp->bus->dongle_ram_base,
8556 		dhdp->bus->dongle_ram_base + dhdp->bus->ramsize));
8557 	if (axi_tcm_addr >= dhdp->bus->dongle_ram_base &&
8558 	    axi_tcm_addr < dhdp->bus->dongle_ram_base + dhdp->bus->ramsize) {
8559 		uint32 axi_signature = dhdpcie_bus_rtcm32(dhdp->bus, (axi_tcm_addr +
8560 			OFFSETOF(hnd_ext_trap_axi_error_v1_t, signature)));
8561 		if (axi_signature == HND_EXT_TRAP_AXIERROR_SIGNATURE) {
8562 			return TRUE;
8563 		} else {
8564 			DHD_ERROR(("%s: No AXI signature: 0x%x\n",
8565 				__FUNCTION__, axi_signature));
8566 			return FALSE;
8567 		}
8568 	} else {
8569 		DHD_ERROR(("%s: No AXI shared tcm address debug info.\n", __FUNCTION__));
8570 		return FALSE;
8571 	}
8572 }
8573 
8574 void
dhd_axi_error(dhd_pub_t * dhdp)8575 dhd_axi_error(dhd_pub_t *dhdp)
8576 {
8577 	dhd_axi_error_dump_t *axi_err_dump;
8578 	uint8 *axi_err_buf = NULL;
8579 	uint8 *p_axi_err = NULL;
8580 	uint32 axi_logbuf_addr;
8581 	uint32 axi_tcm_addr;
8582 	int err, size;
8583 
8584 	OSL_DELAY(75000);
8585 
8586 	axi_logbuf_addr = dhdp->axierror_logbuf_addr;
8587 	if (!axi_logbuf_addr) {
8588 		DHD_ERROR(("%s: No AXI TCM address debug info.\n", __FUNCTION__));
8589 		goto sched_axi;
8590 	}
8591 
8592 	axi_err_dump = dhdp->axi_err_dump;
8593 	if (!axi_err_dump) {
8594 		goto sched_axi;
8595 	}
8596 
8597 	if (!dhd_axi_sig_match(dhdp)) {
8598 		goto sched_axi;
8599 	}
8600 
8601 	/* Reading AXI error data for SMMU fault */
8602 	DHD_ERROR(("%s: Read AXI data from TCM address\n", __FUNCTION__));
8603 	axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, axi_logbuf_addr);
8604 	size = sizeof(hnd_ext_trap_axi_error_v1_t);
8605 	axi_err_buf = MALLOCZ(dhdp->osh, size);
8606 	if (axi_err_buf == NULL) {
8607 		DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
8608 		goto sched_axi;
8609 	}
8610 
8611 	p_axi_err = axi_err_buf;
8612 	err = dhdpcie_bus_membytes(dhdp->bus, FALSE, axi_tcm_addr, p_axi_err, size);
8613 	if (err) {
8614 		DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
8615 			__FUNCTION__, err, size, axi_tcm_addr));
8616 		goto sched_axi;
8617 	}
8618 
8619 	/* Dump data to Dmesg */
8620 	dhd_log_dump_axi_error(axi_err_buf);
8621 	err = memcpy_s(&axi_err_dump->etd_axi_error_v1, size, axi_err_buf, size);
8622 	if (err) {
8623 		DHD_ERROR(("%s: failed to copy etd axi error info, err=%d\n",
8624 			__FUNCTION__, err));
8625 	}
8626 
8627 sched_axi:
8628 	if (axi_err_buf) {
8629 		MFREE(dhdp->osh, axi_err_buf, size);
8630 	}
8631 	dhd_schedule_axi_error_dump(dhdp, NULL);
8632 }
8633 
8634 static void
dhd_log_dump_axi_error(uint8 * axi_err)8635 dhd_log_dump_axi_error(uint8 *axi_err)
8636 {
8637 	dma_dentry_v1_t dma_dentry;
8638 	dma_fifo_v1_t dma_fifo;
8639 	int i = 0, j = 0;
8640 
8641 	if (*(uint8 *)axi_err == HND_EXT_TRAP_AXIERROR_VERSION_1) {
8642 		hnd_ext_trap_axi_error_v1_t *axi_err_v1 = (hnd_ext_trap_axi_error_v1_t *)axi_err;
8643 		DHD_ERROR(("%s: signature : 0x%x\n", __FUNCTION__, axi_err_v1->signature));
8644 		DHD_ERROR(("%s: version : 0x%x\n", __FUNCTION__, axi_err_v1->version));
8645 		DHD_ERROR(("%s: length : 0x%x\n", __FUNCTION__, axi_err_v1->length));
8646 		DHD_ERROR(("%s: dma_fifo_valid_count : 0x%x\n",
8647 			__FUNCTION__, axi_err_v1->dma_fifo_valid_count));
8648 		DHD_ERROR(("%s: axi_errorlog_status : 0x%x\n",
8649 			__FUNCTION__, axi_err_v1->axi_errorlog_status));
8650 		DHD_ERROR(("%s: axi_errorlog_core : 0x%x\n",
8651 			__FUNCTION__, axi_err_v1->axi_errorlog_core));
8652 		DHD_ERROR(("%s: axi_errorlog_hi : 0x%x\n",
8653 			__FUNCTION__, axi_err_v1->axi_errorlog_hi));
8654 		DHD_ERROR(("%s: axi_errorlog_lo : 0x%x\n",
8655 			__FUNCTION__, axi_err_v1->axi_errorlog_lo));
8656 		DHD_ERROR(("%s: axi_errorlog_id : 0x%x\n",
8657 			__FUNCTION__, axi_err_v1->axi_errorlog_id));
8658 
8659 		for (i = 0; i < MAX_DMAFIFO_ENTRIES_V1; i++) {
8660 			dma_fifo = axi_err_v1->dma_fifo[i];
8661 			DHD_ERROR(("%s: valid:%d : 0x%x\n", __FUNCTION__, i, dma_fifo.valid));
8662 			DHD_ERROR(("%s: direction:%d : 0x%x\n",
8663 				__FUNCTION__, i, dma_fifo.direction));
8664 			DHD_ERROR(("%s: index:%d : 0x%x\n",
8665 				__FUNCTION__, i, dma_fifo.index));
8666 			DHD_ERROR(("%s: dpa:%d : 0x%x\n",
8667 				__FUNCTION__, i, dma_fifo.dpa));
8668 			DHD_ERROR(("%s: desc_lo:%d : 0x%x\n",
8669 				__FUNCTION__, i, dma_fifo.desc_lo));
8670 			DHD_ERROR(("%s: desc_hi:%d : 0x%x\n",
8671 				__FUNCTION__, i, dma_fifo.desc_hi));
8672 			DHD_ERROR(("%s: din:%d : 0x%x\n",
8673 				__FUNCTION__, i, dma_fifo.din));
8674 			DHD_ERROR(("%s: dout:%d : 0x%x\n",
8675 				__FUNCTION__, i, dma_fifo.dout));
8676 			for (j = 0; j < MAX_DMAFIFO_DESC_ENTRIES_V1; j++) {
8677 				dma_dentry = axi_err_v1->dma_fifo[i].dentry[j];
8678 				DHD_ERROR(("%s: ctrl1:%d : 0x%x\n",
8679 					__FUNCTION__, i, dma_dentry.ctrl1));
8680 				DHD_ERROR(("%s: ctrl2:%d : 0x%x\n",
8681 					__FUNCTION__, i, dma_dentry.ctrl2));
8682 				DHD_ERROR(("%s: addrlo:%d : 0x%x\n",
8683 					__FUNCTION__, i, dma_dentry.addrlo));
8684 				DHD_ERROR(("%s: addrhi:%d : 0x%x\n",
8685 					__FUNCTION__, i, dma_dentry.addrhi));
8686 			}
8687 		}
8688 	}
8689 	else {
8690 		DHD_ERROR(("%s: Invalid AXI version: 0x%x\n", __FUNCTION__, (*(uint8 *)axi_err)));
8691 	}
8692 }
8693 #endif /* DNGL_AXI_ERROR_LOGGING */
8694 
8695 /**
8696  * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
8697  * flow queue to their flow ring.
8698  */
8699 static void
dhd_update_txflowrings(dhd_pub_t * dhd)8700 dhd_update_txflowrings(dhd_pub_t *dhd)
8701 {
8702 	unsigned long flags;
8703 	dll_t *item, *next;
8704 	flow_ring_node_t *flow_ring_node;
8705 	struct dhd_bus *bus = dhd->bus;
8706 
8707 	if (dhd_query_bus_erros(dhd)) {
8708 		return;
8709 	}
8710 
8711 	/* Hold flowring_list_lock to ensure no race condition while accessing the List */
8712 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
8713 	for (item = dll_head_p(&bus->flowring_active_list);
8714 		(!dhd_is_device_removed(dhd) && !dll_end(&bus->flowring_active_list, item));
8715 		item = next) {
8716 		if (dhd->hang_was_sent) {
8717 			break;
8718 		}
8719 
8720 		next = dll_next_p(item);
8721 		flow_ring_node = dhd_constlist_to_flowring(item);
8722 
8723 		/* Ensure that flow_ring_node in the list is Not Null */
8724 		ASSERT(flow_ring_node != NULL);
8725 
8726 		/* Ensure that the flowring node has valid contents */
8727 		ASSERT(flow_ring_node->prot_info != NULL);
8728 
8729 		dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
8730 	}
8731 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
8732 }
8733 
8734 /** Mailbox ringbell Function */
8735 static void
dhd_bus_gen_devmb_intr(struct dhd_bus * bus)8736 dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
8737 {
8738 	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
8739 		(bus->sih->buscorerev == 4)) {
8740 		DHD_ERROR(("mailbox communication not supported\n"));
8741 		return;
8742 	}
8743 	if (bus->db1_for_mb)  {
8744 		/* this is a pcie core register, not the config register */
8745 		DHD_INFO(("writing a mail box interrupt to the device, through doorbell 1\n"));
8746 		if (DAR_PWRREQ(bus)) {
8747 			dhd_bus_pcie_pwr_req(bus);
8748 		}
8749 		si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus),
8750 			~0, 0x12345678);
8751 	} else {
8752 		DHD_INFO(("writing a mail box interrupt to the device, through config space\n"));
8753 		dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
8754 		dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
8755 	}
8756 }
8757 
8758 /* Upon receiving a mailbox interrupt,
8759  * if H2D_FW_TRAP bit is set in mailbox location
8760  * device traps
8761  */
8762 static void
dhdpcie_fw_trap(dhd_bus_t * bus)8763 dhdpcie_fw_trap(dhd_bus_t *bus)
8764 {
8765 	/* Send the mailbox data and generate mailbox intr. */
8766 	dhdpcie_send_mb_data(bus, H2D_FW_TRAP);
8767 	/* For FWs that cannot interprete H2D_FW_TRAP */
8768 	(void)dhd_wl_ioctl_set_intiovar(bus->dhd, "bus:disconnect", 99, WLC_SET_VAR, TRUE, 0);
8769 }
8770 
8771 /** mailbox doorbell ring function */
8772 void
dhd_bus_ringbell(struct dhd_bus * bus,uint32 value)8773 dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
8774 {
8775 	/* Skip after sending D3_INFORM */
8776 	if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
8777 		DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
8778 			__FUNCTION__, bus->bus_low_power_state));
8779 		return;
8780 	}
8781 
8782 	/* Skip in the case of link down */
8783 	if (bus->is_linkdown) {
8784 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
8785 		return;
8786 	}
8787 
8788 	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
8789 		(bus->sih->buscorerev == 4)) {
8790 		si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int,
8791 			PCIE_INTB, PCIE_INTB);
8792 	} else {
8793 		/* this is a pcie core register, not the config regsiter */
8794 		DHD_INFO(("writing a door bell to the device\n"));
8795 		if (IDMA_ACTIVE(bus->dhd)) {
8796 			if (DAR_PWRREQ(bus)) {
8797 				dhd_bus_pcie_pwr_req(bus);
8798 			}
8799 			si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
8800 				~0, value);
8801 		} else {
8802 			if (DAR_PWRREQ(bus)) {
8803 				dhd_bus_pcie_pwr_req(bus);
8804 			}
8805 			si_corereg(bus->sih, bus->sih->buscoreidx,
8806 				dhd_bus_db0_addr_get(bus), ~0, 0x12345678);
8807 		}
8808 	}
8809 }
8810 
8811 /** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */
8812 void
dhd_bus_ringbell_2(struct dhd_bus * bus,uint32 value,bool devwake)8813 dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake)
8814 {
8815 	/* this is a pcie core register, not the config regsiter */
8816 	/* Skip after sending D3_INFORM */
8817 	if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
8818 		DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
8819 			__FUNCTION__, bus->bus_low_power_state));
8820 		return;
8821 	}
8822 
8823 	/* Skip in the case of link down */
8824 	if (bus->is_linkdown) {
8825 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
8826 		return;
8827 	}
8828 
8829 	DHD_INFO(("writing a door bell 2 to the device\n"));
8830 	if (DAR_PWRREQ(bus)) {
8831 		dhd_bus_pcie_pwr_req(bus);
8832 	}
8833 	si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
8834 		~0, value);
8835 }
8836 
8837 void
dhdpcie_bus_ringbell_fast(struct dhd_bus * bus,uint32 value)8838 dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
8839 {
8840 	/* Skip after sending D3_INFORM */
8841 	if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
8842 		DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
8843 			__FUNCTION__, bus->bus_low_power_state));
8844 		return;
8845 	}
8846 
8847 	/* Skip in the case of link down */
8848 	if (bus->is_linkdown) {
8849 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
8850 		return;
8851 	}
8852 
8853 	if (DAR_PWRREQ(bus)) {
8854 		dhd_bus_pcie_pwr_req(bus);
8855 	}
8856 
8857 #ifdef DHD_DB0TS
8858 	if (bus->dhd->db0ts_capable) {
8859 		uint64 ts;
8860 
8861 		ts = local_clock();
8862 		do_div(ts, 1000);
8863 
8864 		value = htol32(ts & 0xFFFFFFFF);
8865 		DHD_INFO(("%s: usec timer = 0x%x\n", __FUNCTION__, value));
8866 	}
8867 #endif /* DHD_DB0TS */
8868 	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
8869 }
8870 
8871 void
dhdpcie_bus_ringbell_2_fast(struct dhd_bus * bus,uint32 value,bool devwake)8872 dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake)
8873 {
8874 	/* Skip after sending D3_INFORM */
8875 	if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
8876 		DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
8877 			__FUNCTION__, bus->bus_low_power_state));
8878 		return;
8879 	}
8880 
8881 	/* Skip in the case of link down */
8882 	if (bus->is_linkdown) {
8883 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
8884 		return;
8885 	}
8886 
8887 	if (DAR_PWRREQ(bus)) {
8888 		dhd_bus_pcie_pwr_req(bus);
8889 	}
8890 	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_2_addr, value);
8891 }
8892 
8893 static void
dhd_bus_ringbell_oldpcie(struct dhd_bus * bus,uint32 value)8894 dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
8895 {
8896 	uint32 w;
8897 	/* Skip after sending D3_INFORM */
8898 	if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
8899 		DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
8900 			__FUNCTION__, bus->bus_low_power_state));
8901 		return;
8902 	}
8903 
8904 	/* Skip in the case of link down */
8905 	if (bus->is_linkdown) {
8906 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
8907 		return;
8908 	}
8909 
8910 	w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
8911 	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
8912 }
8913 
8914 dhd_mb_ring_t
dhd_bus_get_mbintr_fn(struct dhd_bus * bus)8915 dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
8916 {
8917 	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
8918 		(bus->sih->buscorerev == 4)) {
8919 		bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
8920 			bus->pcie_mailbox_int);
8921 		if (bus->pcie_mb_intr_addr) {
8922 			bus->pcie_mb_intr_osh = si_osh(bus->sih);
8923 			return dhd_bus_ringbell_oldpcie;
8924 		}
8925 	} else {
8926 		bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
8927 			dhd_bus_db0_addr_get(bus));
8928 		if (bus->pcie_mb_intr_addr) {
8929 			bus->pcie_mb_intr_osh = si_osh(bus->sih);
8930 			return dhdpcie_bus_ringbell_fast;
8931 		}
8932 	}
8933 	return dhd_bus_ringbell;
8934 }
8935 
8936 dhd_mb_ring_2_t
dhd_bus_get_mbintr_2_fn(struct dhd_bus * bus)8937 dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus)
8938 {
8939 	bus->pcie_mb_intr_2_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
8940 		dhd_bus_db0_addr_2_get(bus));
8941 	if (bus->pcie_mb_intr_2_addr) {
8942 		bus->pcie_mb_intr_osh = si_osh(bus->sih);
8943 		return dhdpcie_bus_ringbell_2_fast;
8944 	}
8945 	return dhd_bus_ringbell_2;
8946 }
8947 
8948 bool BCMFASTPATH
dhd_bus_dpc(struct dhd_bus * bus)8949 dhd_bus_dpc(struct dhd_bus *bus)
8950 {
8951 	bool resched = FALSE;	  /* Flag indicating resched wanted */
8952 	unsigned long flags;
8953 
8954 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
8955 
8956 	bus->dpc_entry_time = OSL_LOCALTIME_NS();
8957 
8958 	DHD_GENERAL_LOCK(bus->dhd, flags);
8959 	/* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
8960 	 * to avoid IOCTL Resumed On timeout when ioctl is waiting for response
8961 	 * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS
8962 	 * and if we return from here, then IOCTL response will never be handled
8963 	 */
8964 	if (bus->dhd->busstate == DHD_BUS_DOWN) {
8965 		DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
8966 		bus->intstatus = 0;
8967 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
8968 		bus->dpc_return_busdown_count++;
8969 		return 0;
8970 	}
8971 #ifdef DHD_PCIE_RUNTIMEPM
8972 	bus->idlecount = 0;
8973 #endif /* DHD_PCIE_RUNTIMEPM */
8974 	DHD_BUS_BUSY_SET_IN_DPC(bus->dhd);
8975 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
8976 
8977 	resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus);
8978 	if (!resched) {
8979 		bus->intstatus = 0;
8980 		bus->dpc_intr_enable_count++;
8981 		/* For Linux, Macos etc (otherthan NDIS) enable back the host interrupts
8982 		 * which has been disabled in the dhdpcie_bus_isr()
8983 		 */
8984 		 dhdpcie_enable_irq(bus); /* Enable back interrupt!! */
8985 		bus->dpc_exit_time = OSL_LOCALTIME_NS();
8986 	} else {
8987 		bus->resched_dpc_time = OSL_LOCALTIME_NS();
8988 	}
8989 
8990 	bus->dpc_sched = resched;
8991 
8992 	DHD_GENERAL_LOCK(bus->dhd, flags);
8993 	DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd);
8994 	dhd_os_busbusy_wake(bus->dhd);
8995 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
8996 
8997 	return resched;
8998 
8999 }
9000 
9001 int
dhdpcie_send_mb_data(dhd_bus_t * bus,uint32 h2d_mb_data)9002 dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
9003 {
9004 	uint32 cur_h2d_mb_data = 0;
9005 
9006 	DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data));
9007 
9008 	if (bus->is_linkdown) {
9009 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
9010 		return BCME_ERROR;
9011 	}
9012 
9013 	if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !bus->use_mailbox) {
9014 		DHD_INFO(("API rev is 6, sending mb data as H2D Ctrl message to dongle, 0x%04x\n",
9015 			h2d_mb_data));
9016 		/* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */
9017 		{
9018 			if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus->dhd, h2d_mb_data)) {
9019 				DHD_ERROR(("failure sending the H2D Mailbox message "
9020 					"to firmware\n"));
9021 				goto fail;
9022 			}
9023 		}
9024 		goto done;
9025 	}
9026 
9027 	dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
9028 
9029 	if (cur_h2d_mb_data != 0) {
9030 		uint32 i = 0;
9031 		DHD_INFO(("GRRRRRRR: MB transaction is already pending 0x%04x\n", cur_h2d_mb_data));
9032 		while ((i++ < 100) && cur_h2d_mb_data) {
9033 			OSL_DELAY(10);
9034 			dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
9035 		}
9036 		if (i >= 100) {
9037 			DHD_ERROR(("%s : waited 1ms for the dngl "
9038 				"to ack the previous mb transaction\n", __FUNCTION__));
9039 			DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n",
9040 				__FUNCTION__, cur_h2d_mb_data));
9041 		}
9042 	}
9043 
9044 	dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0);
9045 	dhd_bus_gen_devmb_intr(bus);
9046 
9047 done:
9048 	if (h2d_mb_data == H2D_HOST_D3_INFORM) {
9049 		DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
9050 		bus->last_d3_inform_time = OSL_LOCALTIME_NS();
9051 		bus->d3_inform_cnt++;
9052 	}
9053 	if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) {
9054 		DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__));
9055 		bus->d0_inform_in_use_cnt++;
9056 	}
9057 	if (h2d_mb_data == H2D_HOST_D0_INFORM) {
9058 		DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__));
9059 		bus->d0_inform_cnt++;
9060 	}
9061 	return BCME_OK;
9062 fail:
9063 	return BCME_ERROR;
9064 }
9065 
9066 static void
dhd_bus_handle_d3_ack(dhd_bus_t * bus)9067 dhd_bus_handle_d3_ack(dhd_bus_t *bus)
9068 {
9069 	unsigned long flags_bus;
9070 	DHD_BUS_LOCK(bus->bus_lock, flags_bus);
9071 	bus->suspend_intr_disable_count++;
9072 	/* Disable dongle Interrupts Immediately after D3 */
9073 
9074 	/* For Linux, Macos etc (otherthan NDIS) along with disabling
9075 	 * dongle interrupt by clearing the IntMask, disable directly
9076 	 * interrupt from the host side as well. Also clear the intstatus
9077 	 * if it is set to avoid unnecessary intrrupts after D3 ACK.
9078 	 */
9079 	dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */
9080 	dhdpcie_bus_clear_intstatus(bus);
9081 	dhdpcie_disable_irq_nosync(bus); /* Disable host interrupt!! */
9082 
9083 	if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
9084 		/* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */
9085 		bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED;
9086 		DHD_ERROR(("%s: D3_ACK Recieved\n", __FUNCTION__));
9087 	}
9088 	DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
9089 	/* Check for D3 ACK induce flag, which is set by firing dhd iovar to induce D3 Ack timeout.
9090 	 * If flag is set, D3 wake is skipped, which results in to D3 Ack timeout.
9091 	 */
9092 	if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
9093 		bus->wait_for_d3_ack = 1;
9094 		dhd_os_d3ack_wake(bus->dhd);
9095 	} else {
9096 		DHD_ERROR(("%s: Inducing D3 ACK timeout\n", __FUNCTION__));
9097 	}
9098 }
9099 void
dhd_bus_handle_mb_data(dhd_bus_t * bus,uint32 d2h_mb_data)9100 dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data)
9101 {
9102 	if (MULTIBP_ENAB(bus->sih)) {
9103 		dhd_bus_pcie_pwr_req(bus);
9104 	}
9105 
9106 	DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
9107 
9108 	if (d2h_mb_data & D2H_DEV_FWHALT) {
9109 		DHD_ERROR(("FW trap has happened\n"));
9110 		dhdpcie_checkdied(bus, NULL, 0);
9111 #ifdef OEM_ANDROID
9112 #ifdef SUPPORT_LINKDOWN_RECOVERY
9113 #ifdef CONFIG_ARCH_MSM
9114 		bus->no_cfg_restore = 1;
9115 #endif /* CONFIG_ARCH_MSM */
9116 #endif /* SUPPORT_LINKDOWN_RECOVERY */
9117 		dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO);
9118 #endif /* OEM_ANDROID */
9119 		goto exit;
9120 	}
9121 	if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ)  {
9122 		bool ds_acked = FALSE;
9123 		BCM_REFERENCE(ds_acked);
9124 		if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
9125 			DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n"));
9126 			DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
9127 			bus->dhd->busstate = DHD_BUS_DOWN;
9128 			goto exit;
9129 		}
9130 		/* what should we do */
9131 		DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
9132 		{
9133 			dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
9134 			DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
9135 		}
9136 	}
9137 	if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE)  {
9138 		/* what should we do */
9139 		DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
9140 	}
9141 	if (d2h_mb_data & D2HMB_DS_HOST_SLEEP_EXIT_ACK)  {
9142 		/* what should we do */
9143 		DHD_INFO(("D2H_MB_DATA: D0 ACK\n"));
9144 	}
9145 	if (d2h_mb_data & D2H_DEV_D3_ACK)  {
9146 		/* what should we do */
9147 		DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
9148 		if (!bus->wait_for_d3_ack) {
9149 #if defined(DHD_HANG_SEND_UP_TEST)
9150 			if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
9151 				DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
9152 			} else {
9153 				dhd_bus_handle_d3_ack(bus);
9154 			}
9155 #else /* DHD_HANG_SEND_UP_TEST */
9156 			dhd_bus_handle_d3_ack(bus);
9157 #endif /* DHD_HANG_SEND_UP_TEST */
9158 		}
9159 	}
9160 
9161 exit:
9162 	if (MULTIBP_ENAB(bus->sih)) {
9163 		dhd_bus_pcie_pwr_req_clear(bus);
9164 	}
9165 }
9166 
9167 static void
dhdpcie_handle_mb_data(dhd_bus_t * bus)9168 dhdpcie_handle_mb_data(dhd_bus_t *bus)
9169 {
9170 	uint32 d2h_mb_data = 0;
9171 	uint32 zero = 0;
9172 
9173 	if (MULTIBP_ENAB(bus->sih)) {
9174 		dhd_bus_pcie_pwr_req(bus);
9175 	}
9176 
9177 	dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
9178 	if (D2H_DEV_MB_INVALIDATED(d2h_mb_data)) {
9179 		DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n",
9180 			__FUNCTION__, d2h_mb_data));
9181 		goto exit;
9182 	}
9183 
9184 	dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
9185 
9186 	DHD_INFO_HW4(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
9187 	if (d2h_mb_data & D2H_DEV_FWHALT)  {
9188 		DHD_ERROR(("FW trap has happened\n"));
9189 		dhdpcie_checkdied(bus, NULL, 0);
9190 		/* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
9191 		goto exit;
9192 	}
9193 	if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ)  {
9194 		/* what should we do */
9195 		DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
9196 		dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
9197 		DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
9198 	}
9199 	if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE)  {
9200 		/* what should we do */
9201 		DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
9202 	}
9203 	if (d2h_mb_data & D2H_DEV_D3_ACK)  {
9204 		/* what should we do */
9205 		DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
9206 		if (!bus->wait_for_d3_ack) {
9207 #if defined(DHD_HANG_SEND_UP_TEST)
9208 			if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
9209 				DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
9210 			} else {
9211 			dhd_bus_handle_d3_ack(bus);
9212 			}
9213 #else /* DHD_HANG_SEND_UP_TEST */
9214 			dhd_bus_handle_d3_ack(bus);
9215 #endif /* DHD_HANG_SEND_UP_TEST */
9216 		}
9217 	}
9218 
9219 exit:
9220 	if (MULTIBP_ENAB(bus->sih)) {
9221 		dhd_bus_pcie_pwr_req_clear(bus);
9222 	}
9223 }
9224 
9225 static void
dhdpcie_read_handle_mb_data(dhd_bus_t * bus)9226 dhdpcie_read_handle_mb_data(dhd_bus_t *bus)
9227 {
9228 	uint32 d2h_mb_data = 0;
9229 	uint32 zero = 0;
9230 
9231 	if (bus->is_linkdown) {
9232 		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
9233 		return;
9234 	}
9235 
9236 	if (MULTIBP_ENAB(bus->sih)) {
9237 		dhd_bus_pcie_pwr_req(bus);
9238 	}
9239 
9240 	dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
9241 	if (!d2h_mb_data) {
9242 		goto exit;
9243 	}
9244 
9245 	dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
9246 
9247 	dhd_bus_handle_mb_data(bus, d2h_mb_data);
9248 
9249 exit:
9250 	if (MULTIBP_ENAB(bus->sih)) {
9251 		dhd_bus_pcie_pwr_req_clear(bus);
9252 	}
9253 }
9254 
9255 static bool
dhdpcie_bus_process_mailbox_intr(dhd_bus_t * bus,uint32 intstatus)9256 dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
9257 {
9258 	bool resched = FALSE;
9259 	unsigned long flags_bus;
9260 
9261 	if (MULTIBP_ENAB(bus->sih)) {
9262 		dhd_bus_pcie_pwr_req(bus);
9263 	}
9264 	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
9265 		(bus->sih->buscorerev == 4)) {
9266 		/* Msg stream interrupt */
9267 		if (intstatus & I_BIT1) {
9268 			resched = dhdpci_bus_read_frames(bus);
9269 		} else if (intstatus & I_BIT0) {
9270 			/* do nothing for Now */
9271 		}
9272 	} else {
9273 		if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
9274 			bus->api.handle_mb_data(bus);
9275 
9276 		/* Do no process any rings after recieving D3_ACK */
9277 		DHD_BUS_LOCK(bus->bus_lock, flags_bus);
9278 		if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
9279 			DHD_ERROR(("%s: D3 Ack Recieved. "
9280 				"Skip processing rest of ring buffers.\n", __FUNCTION__));
9281 			DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
9282 			goto exit;
9283 		}
9284 		DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
9285 
9286 		/* Validate intstatus only for INTX case */
9287 		if ((bus->d2h_intr_method == PCIE_MSI) ||
9288 			((bus->d2h_intr_method == PCIE_INTX) && (intstatus & bus->d2h_mb_mask))) {
9289 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
9290 			if (pm_runtime_get(dhd_bus_to_dev(bus)) >= 0) {
9291 				resched = dhdpci_bus_read_frames(bus);
9292 				pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
9293 				pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
9294 			}
9295 #else
9296 			resched = dhdpci_bus_read_frames(bus);
9297 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
9298 		}
9299 	}
9300 
9301 exit:
9302 	if (MULTIBP_ENAB(bus->sih)) {
9303 		dhd_bus_pcie_pwr_req_clear(bus);
9304 	}
9305 	return resched;
9306 }
9307 
9308 #if defined(DHD_H2D_LOG_TIME_SYNC)
9309 static void
dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t * bus)9310 dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus)
9311 {
9312 	unsigned long time_elapsed;
9313 
9314 	/* Poll for timeout value periodically */
9315 	if ((bus->dhd->busstate == DHD_BUS_DATA) &&
9316 		(bus->dhd->dhd_rte_time_sync_ms != 0) &&
9317 		(bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE)) {
9318 		time_elapsed = OSL_SYSUPTIME_US() - bus->dhd_rte_time_sync_count;
9319 		/* Compare time is milli seconds */
9320 		if ((time_elapsed / 1000) >= bus->dhd->dhd_rte_time_sync_ms) {
9321 			/*
9322 			 * Its fine, if it has crossed the timeout value. No need to adjust the
9323 			 * elapsed time
9324 			 */
9325 			bus->dhd_rte_time_sync_count += time_elapsed;
9326 
9327 			/* Schedule deffered work. Work function will send IOVAR. */
9328 			dhd_h2d_log_time_sync_deferred_wq_schedule(bus->dhd);
9329 		}
9330 	}
9331 }
9332 #endif /* DHD_H2D_LOG_TIME_SYNC */
9333 
9334 static bool
dhdpci_bus_read_frames(dhd_bus_t * bus)9335 dhdpci_bus_read_frames(dhd_bus_t *bus)
9336 {
9337 	bool more = FALSE;
9338 	unsigned long flags_bus;
9339 
9340 	/* First check if there a FW trap */
9341 	if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) &&
9342 		(bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) {
9343 #ifdef DNGL_AXI_ERROR_LOGGING
9344 		if (bus->dhd->axi_error) {
9345 			DHD_ERROR(("AXI Error happened\n"));
9346 			return FALSE;
9347 		}
9348 #endif /* DNGL_AXI_ERROR_LOGGING */
9349 		dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT);
9350 		return FALSE;
9351 	}
9352 
9353 	/* There may be frames in both ctrl buf and data buf; check ctrl buf first */
9354 	DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
9355 
9356 	dhd_prot_process_ctrlbuf(bus->dhd);
9357 	bus->last_process_ctrlbuf_time = OSL_LOCALTIME_NS();
9358 	/* Unlock to give chance for resp to be handled */
9359 	DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
9360 
9361 	/* Do not process rest of ring buf once bus enters low power state (D3_INFORM/D3_ACK) */
9362 	DHD_BUS_LOCK(bus->bus_lock, flags_bus);
9363 	if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
9364 		DHD_ERROR(("%s: Bus is in power save state (%d). "
9365 			"Skip processing rest of ring buffers.\n",
9366 			__FUNCTION__, bus->bus_low_power_state));
9367 		DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
9368 		return FALSE;
9369 	}
9370 	DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
9371 
9372 	DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
9373 	/* update the flow ring cpls */
9374 	dhd_update_txflowrings(bus->dhd);
9375 	bus->last_process_flowring_time = OSL_LOCALTIME_NS();
9376 
9377 	/* With heavy TX traffic, we could get a lot of TxStatus
9378 	 * so add bound
9379 	 */
9380 #ifdef DHD_HP2P
9381 	more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_HP2P_RING);
9382 #endif /* DHD_HP2P */
9383 	more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_REGULAR_RING);
9384 	bus->last_process_txcpl_time = OSL_LOCALTIME_NS();
9385 
9386 	/* With heavy RX traffic, this routine potentially could spend some time
9387 	 * processing RX frames without RX bound
9388 	 */
9389 #ifdef DHD_HP2P
9390 	more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_HP2P_RING);
9391 #endif /* DHD_HP2P */
9392 	more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_REGULAR_RING);
9393 	bus->last_process_rxcpl_time = OSL_LOCALTIME_NS();
9394 
9395 	/* Process info ring completion messages */
9396 #ifdef EWP_EDL
9397 	if (!bus->dhd->dongle_edl_support)
9398 #endif // endif
9399 	{
9400 		more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND);
9401 		bus->last_process_infocpl_time = OSL_LOCALTIME_NS();
9402 	}
9403 #ifdef EWP_EDL
9404 	else {
9405 		more |= dhd_prot_process_msgbuf_edl(bus->dhd);
9406 		bus->last_process_edl_time = OSL_LOCALTIME_NS();
9407 	}
9408 #endif /* EWP_EDL */
9409 
9410 #ifdef IDLE_TX_FLOW_MGMT
9411 	if (bus->enable_idle_flowring_mgmt) {
9412 		/* Look for idle flow rings */
9413 		dhd_bus_check_idle_scan(bus);
9414 	}
9415 #endif /* IDLE_TX_FLOW_MGMT */
9416 
9417 	/* don't talk to the dongle if fw is about to be reloaded */
9418 	if (bus->dhd->hang_was_sent) {
9419 		more = FALSE;
9420 	}
9421 	DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
9422 
9423 #ifdef SUPPORT_LINKDOWN_RECOVERY
9424 	if (bus->read_shm_fail) {
9425 		/* Read interrupt state once again to confirm linkdown */
9426 		int intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
9427 			bus->pcie_mailbox_int, 0, 0);
9428 		if (intstatus != (uint32)-1) {
9429 			DHD_ERROR(("%s: read SHM failed but intstatus is valid\n", __FUNCTION__));
9430 #ifdef DHD_FW_COREDUMP
9431 			if (bus->dhd->memdump_enabled) {
9432 				DHD_OS_WAKE_LOCK(bus->dhd);
9433 				bus->dhd->memdump_type = DUMP_TYPE_READ_SHM_FAIL;
9434 				dhd_bus_mem_dump(bus->dhd);
9435 				DHD_OS_WAKE_UNLOCK(bus->dhd);
9436 			}
9437 #endif /* DHD_FW_COREDUMP */
9438 		} else {
9439 			DHD_ERROR(("%s: Link is Down.\n", __FUNCTION__));
9440 #ifdef CONFIG_ARCH_MSM
9441 			bus->no_cfg_restore = 1;
9442 #endif /* CONFIG_ARCH_MSM */
9443 			bus->is_linkdown = 1;
9444 		}
9445 
9446 		dhd_prot_debug_info_print(bus->dhd);
9447 		bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN_EP_DETECT;
9448 		dhd_os_send_hang_message(bus->dhd);
9449 		more = FALSE;
9450 	}
9451 #endif /* SUPPORT_LINKDOWN_RECOVERY */
9452 #if defined(DHD_H2D_LOG_TIME_SYNC)
9453 	dhdpci_bus_rte_log_time_sync_poll(bus);
9454 #endif /* DHD_H2D_LOG_TIME_SYNC */
9455 	return more;
9456 }
9457 
9458 bool
dhdpcie_tcm_valid(dhd_bus_t * bus)9459 dhdpcie_tcm_valid(dhd_bus_t *bus)
9460 {
9461 	uint32 addr = 0;
9462 	int rv;
9463 	uint32 shaddr = 0;
9464 	pciedev_shared_t sh;
9465 
9466 	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
9467 
9468 	/* Read last word in memory to determine address of pciedev_shared structure */
9469 	addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
9470 
9471 	if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
9472 		(addr > shaddr)) {
9473 		DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n",
9474 			__FUNCTION__, addr));
9475 		return FALSE;
9476 	}
9477 
9478 	/* Read hndrte_shared structure */
9479 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh,
9480 		sizeof(pciedev_shared_t))) < 0) {
9481 		DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
9482 		return FALSE;
9483 	}
9484 
9485 	/* Compare any field in pciedev_shared_t */
9486 	if (sh.console_addr != bus->pcie_sh->console_addr) {
9487 		DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n"));
9488 		return FALSE;
9489 	}
9490 
9491 	return TRUE;
9492 }
9493 
9494 static void
dhdpcie_update_bus_api_revisions(uint32 firmware_api_version,uint32 host_api_version)9495 dhdpcie_update_bus_api_revisions(uint32 firmware_api_version, uint32 host_api_version)
9496 {
9497 	snprintf(bus_api_revision, BUS_API_REV_STR_LEN, "\nBus API revisions:(FW rev%d)(DHD rev%d)",
9498 			firmware_api_version, host_api_version);
9499 	return;
9500 }
9501 
9502 static bool
dhdpcie_check_firmware_compatible(uint32 firmware_api_version,uint32 host_api_version)9503 dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version)
9504 {
9505 	bool retcode = FALSE;
9506 
9507 	DHD_INFO(("firmware api revision %d, host api revision %d\n",
9508 		firmware_api_version, host_api_version));
9509 
9510 	switch (firmware_api_version) {
9511 	case PCIE_SHARED_VERSION_7:
9512 	case PCIE_SHARED_VERSION_6:
9513 	case PCIE_SHARED_VERSION_5:
9514 		retcode = TRUE;
9515 		break;
9516 	default:
9517 		if (firmware_api_version <= host_api_version)
9518 			retcode = TRUE;
9519 	}
9520 	return retcode;
9521 }
9522 
9523 static int
dhdpcie_readshared_console(dhd_bus_t * bus)9524 dhdpcie_readshared_console(dhd_bus_t *bus)
9525 {
9526 	uint32 addr = 0;
9527 	uint32 shaddr = 0;
9528 	int rv;
9529 	pciedev_shared_t *sh = bus->pcie_sh;
9530 	dhd_timeout_t tmo;
9531 
9532 	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
9533 	/* start a timer for 5 seconds */
9534 	dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
9535 
9536 	while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
9537 		/* Read last word in memory to determine address of pciedev_shared structure */
9538 		addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
9539 	}
9540 
9541 	if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
9542 		(addr > shaddr)) {
9543 		DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
9544 			__FUNCTION__, addr));
9545 		DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed));
9546 		return BCME_ERROR;
9547 	} else {
9548 		bus->shared_addr = (ulong)addr;
9549 		DHD_ERROR(("%s:PCIe shared addr (0x%08x) read took %u usec "
9550 			"before dongle is ready\n", __FUNCTION__, addr, tmo.elapsed));
9551 	}
9552 
9553 	/* Read hndrte_shared structure */
9554 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
9555 		sizeof(pciedev_shared_t))) < 0) {
9556 		DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
9557 		return rv;
9558 	}
9559 
9560 	/* Endianness */
9561 	sh->console_addr = ltoh32(sh->console_addr);
9562 	/* load bus console address */
9563 	bus->console_addr = sh->console_addr;
9564 
9565 	return BCME_OK;
9566 } /* dhdpcie_readshared_console */
9567 
9568 static int
dhdpcie_readshared(dhd_bus_t * bus)9569 dhdpcie_readshared(dhd_bus_t *bus)
9570 {
9571 	uint32 addr = 0;
9572 	int rv, dma_indx_wr_buf, dma_indx_rd_buf;
9573 	uint32 shaddr = 0;
9574 	pciedev_shared_t *sh = bus->pcie_sh;
9575 	dhd_timeout_t tmo;
9576 	bool idma_en = FALSE;
9577 
9578 	if (MULTIBP_ENAB(bus->sih)) {
9579 		dhd_bus_pcie_pwr_req(bus);
9580 	}
9581 
9582 	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
9583 	/* start a timer for 5 seconds */
9584 	dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
9585 
9586 	while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
9587 		/* Read last word in memory to determine address of pciedev_shared structure */
9588 		addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
9589 	}
9590 
9591 	if (addr == (uint32)-1) {
9592 		DHD_ERROR(("%s: PCIe link might be down\n", __FUNCTION__));
9593 #ifdef SUPPORT_LINKDOWN_RECOVERY
9594 #ifdef CONFIG_ARCH_MSM
9595 		bus->no_cfg_restore = 1;
9596 #endif /* CONFIG_ARCH_MSM */
9597 #endif /* SUPPORT_LINKDOWN_RECOVERY */
9598 		bus->is_linkdown = 1;
9599 		return BCME_ERROR;
9600 	}
9601 
9602 	if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
9603 		(addr > shaddr)) {
9604 		DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
9605 			__FUNCTION__, addr));
9606 		DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed));
9607 #ifdef DEBUG_DNGL_INIT_FAIL
9608 		if (addr != (uint32)-1) {	/* skip further PCIE reads if read this addr */
9609 #ifdef CUSTOMER_HW4_DEBUG
9610 			bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
9611 #endif /* CUSTOMER_HW4_DEBUG */
9612 			if (bus->dhd->memdump_enabled) {
9613 				bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE;
9614 				dhdpcie_mem_dump(bus);
9615 			}
9616 		}
9617 #endif /* DEBUG_DNGL_INIT_FAIL */
9618 		return BCME_ERROR;
9619 	} else {
9620 		bus->shared_addr = (ulong)addr;
9621 		DHD_ERROR(("PCIe shared addr (0x%08x) read took %u usec "
9622 			"before dongle is ready\n", addr, tmo.elapsed));
9623 	}
9624 
9625 	/* Read hndrte_shared structure */
9626 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
9627 		sizeof(pciedev_shared_t))) < 0) {
9628 		DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
9629 		return rv;
9630 	}
9631 
9632 	/* Endianness */
9633 	sh->flags = ltoh32(sh->flags);
9634 	sh->trap_addr = ltoh32(sh->trap_addr);
9635 	sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
9636 	sh->assert_file_addr = ltoh32(sh->assert_file_addr);
9637 	sh->assert_line = ltoh32(sh->assert_line);
9638 	sh->console_addr = ltoh32(sh->console_addr);
9639 	sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
9640 	sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
9641 	sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
9642 	sh->flags2 = ltoh32(sh->flags2);
9643 
9644 	/* load bus console address */
9645 	bus->console_addr = sh->console_addr;
9646 
9647 	/* Read the dma rx offset */
9648 	bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
9649 	dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
9650 
9651 	DHD_INFO(("DMA RX offset from shared Area %d\n", bus->dma_rxoffset));
9652 
9653 	bus->api.fw_rev = sh->flags & PCIE_SHARED_VERSION_MASK;
9654 	if (!(dhdpcie_check_firmware_compatible(bus->api.fw_rev, PCIE_SHARED_VERSION)))
9655 	{
9656 		DHD_ERROR(("%s: pcie_shared version %d in dhd "
9657 		           "is older than pciedev_shared version %d in dongle\n",
9658 		           __FUNCTION__, PCIE_SHARED_VERSION,
9659 		           bus->api.fw_rev));
9660 		return BCME_ERROR;
9661 	}
9662 	dhdpcie_update_bus_api_revisions(bus->api.fw_rev, PCIE_SHARED_VERSION);
9663 
9664 	bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ?
9665 		sizeof(uint16) : sizeof(uint32);
9666 	DHD_INFO(("%s: Dongle advertizes %d size indices\n",
9667 		__FUNCTION__, bus->rw_index_sz));
9668 
9669 #ifdef IDLE_TX_FLOW_MGMT
9670 	if (sh->flags & PCIE_SHARED_IDLE_FLOW_RING) {
9671 		DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n",
9672 			__FUNCTION__));
9673 		bus->enable_idle_flowring_mgmt = TRUE;
9674 	}
9675 #endif /* IDLE_TX_FLOW_MGMT */
9676 
9677 	if (IDMA_CAPABLE(bus)) {
9678 		if (bus->sih->buscorerev == 23) {
9679 		} else {
9680 			idma_en = TRUE;
9681 		}
9682 	}
9683 
9684 	/* TODO: This need to be selected based on IPC instead of compile time */
9685 	bus->dhd->hwa_enable = TRUE;
9686 
9687 	if (idma_en) {
9688 		bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE;
9689 		bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE;
9690 	}
9691 
9692 	bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
9693 
9694 	bus->dhd->dar_enable = (sh->flags & PCIE_SHARED_DAR) ? TRUE : FALSE;
9695 
9696 	/* Does the FW support DMA'ing r/w indices */
9697 	if (sh->flags & PCIE_SHARED_DMA_INDEX) {
9698 		if (!bus->dhd->dma_ring_upd_overwrite) {
9699 			{
9700 				if (!IFRM_ENAB(bus->dhd)) {
9701 					bus->dhd->dma_h2d_ring_upd_support = TRUE;
9702 				}
9703 				bus->dhd->dma_d2h_ring_upd_support = TRUE;
9704 			}
9705 		}
9706 
9707 		if (bus->dhd->dma_d2h_ring_upd_support)
9708 			bus->dhd->d2h_sync_mode = 0;
9709 
9710 		DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
9711 			__FUNCTION__,
9712 			(bus->dhd->dma_h2d_ring_upd_support ? 1 : 0),
9713 			(bus->dhd->dma_d2h_ring_upd_support ? 1 : 0)));
9714 	} else if (!(sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK)) {
9715 		DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n",
9716 			__FUNCTION__));
9717 		return BCME_UNSUPPORTED;
9718 	} else {
9719 		bus->dhd->dma_h2d_ring_upd_support = FALSE;
9720 		bus->dhd->dma_d2h_ring_upd_support = FALSE;
9721 	}
9722 
9723 	/* Does the firmware support fast delete ring? */
9724 	if (sh->flags2 & PCIE_SHARED2_FAST_DELETE_RING) {
9725 		DHD_INFO(("%s: Firmware supports fast delete ring\n",
9726 			__FUNCTION__));
9727 		bus->dhd->fast_delete_ring_support = TRUE;
9728 	} else {
9729 		DHD_INFO(("%s: Firmware does not support fast delete ring\n",
9730 			__FUNCTION__));
9731 		bus->dhd->fast_delete_ring_support = FALSE;
9732 	}
9733 
9734 	/* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
9735 	{
9736 		ring_info_t  ring_info;
9737 
9738 		/* boundary check */
9739 		if (sh->rings_info_ptr > shaddr) {
9740 			DHD_ERROR(("%s: rings_info_ptr is invalid 0x%x, skip reading ring info\n",
9741 				__FUNCTION__, sh->rings_info_ptr));
9742 			return BCME_ERROR;
9743 		}
9744 
9745 		if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
9746 			(uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
9747 			return rv;
9748 
9749 		bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
9750 		bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
9751 
9752 		if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
9753 			bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
9754 			bus->max_submission_rings = ltoh16(ring_info.max_submission_queues);
9755 			bus->max_completion_rings = ltoh16(ring_info.max_completion_rings);
9756 			bus->max_cmn_rings = bus->max_submission_rings - bus->max_tx_flowrings;
9757 			bus->api.handle_mb_data = dhdpcie_read_handle_mb_data;
9758 			bus->use_mailbox = sh->flags & PCIE_SHARED_USE_MAILBOX;
9759 		}
9760 		else {
9761 			bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
9762 			bus->max_submission_rings = bus->max_tx_flowrings;
9763 			bus->max_completion_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
9764 			bus->max_cmn_rings = BCMPCIE_H2D_COMMON_MSGRINGS;
9765 			bus->api.handle_mb_data = dhdpcie_handle_mb_data;
9766 			bus->use_mailbox = TRUE;
9767 		}
9768 		if (bus->max_completion_rings == 0) {
9769 			DHD_ERROR(("dongle completion rings are invalid %d\n",
9770 				bus->max_completion_rings));
9771 			return BCME_ERROR;
9772 		}
9773 		if (bus->max_submission_rings == 0) {
9774 			DHD_ERROR(("dongle submission rings are invalid %d\n",
9775 				bus->max_submission_rings));
9776 			return BCME_ERROR;
9777 		}
9778 		if (bus->max_tx_flowrings == 0) {
9779 			DHD_ERROR(("dongle txflow rings are invalid %d\n", bus->max_tx_flowrings));
9780 			return BCME_ERROR;
9781 		}
9782 
9783 		/* If both FW and Host support DMA'ing indices, allocate memory and notify FW
9784 		 * The max_sub_queues is read from FW initialized ring_info
9785 		 */
9786 		if (bus->dhd->dma_h2d_ring_upd_support || IDMA_ENAB(bus->dhd)) {
9787 			dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
9788 				H2D_DMA_INDX_WR_BUF, bus->max_submission_rings);
9789 			dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
9790 				D2H_DMA_INDX_RD_BUF, bus->max_completion_rings);
9791 
9792 			if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
9793 				DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
9794 						"Host will use w/r indices in TCM\n",
9795 						__FUNCTION__));
9796 				bus->dhd->dma_h2d_ring_upd_support = FALSE;
9797 				bus->dhd->idma_enable = FALSE;
9798 			}
9799 		}
9800 
9801 		if (bus->dhd->dma_d2h_ring_upd_support) {
9802 			dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
9803 				D2H_DMA_INDX_WR_BUF, bus->max_completion_rings);
9804 			dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
9805 				H2D_DMA_INDX_RD_BUF, bus->max_submission_rings);
9806 
9807 			if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
9808 				DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
9809 						"Host will use w/r indices in TCM\n",
9810 						__FUNCTION__));
9811 				bus->dhd->dma_d2h_ring_upd_support = FALSE;
9812 			}
9813 		}
9814 
9815 		if (IFRM_ENAB(bus->dhd)) {
9816 			dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
9817 				H2D_IFRM_INDX_WR_BUF, bus->max_tx_flowrings);
9818 
9819 			if (dma_indx_wr_buf != BCME_OK) {
9820 				DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n",
9821 						__FUNCTION__));
9822 				bus->dhd->ifrm_enable = FALSE;
9823 			}
9824 		}
9825 
9826 		/* read ringmem and ringstate ptrs from shared area and store in host variables */
9827 		dhd_fillup_ring_sharedptr_info(bus, &ring_info);
9828 		if (dhd_msg_level & DHD_INFO_VAL) {
9829 			bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
9830 		}
9831 		DHD_INFO(("ring_info\n"));
9832 
9833 		DHD_ERROR(("%s: max H2D queues %d\n",
9834 			__FUNCTION__, ltoh16(ring_info.max_tx_flowrings)));
9835 
9836 		DHD_INFO(("mail box address\n"));
9837 		DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n",
9838 			__FUNCTION__, bus->h2d_mb_data_ptr_addr));
9839 		DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n",
9840 			__FUNCTION__, bus->d2h_mb_data_ptr_addr));
9841 	}
9842 
9843 	DHD_INFO(("%s: d2h_sync_mode 0x%08x\n",
9844 		__FUNCTION__, bus->dhd->d2h_sync_mode));
9845 
9846 	bus->dhd->d2h_hostrdy_supported =
9847 		((sh->flags & PCIE_SHARED_HOSTRDY_SUPPORT) == PCIE_SHARED_HOSTRDY_SUPPORT);
9848 
9849 	bus->dhd->ext_trap_data_supported =
9850 		((sh->flags2 & PCIE_SHARED2_EXTENDED_TRAP_DATA) == PCIE_SHARED2_EXTENDED_TRAP_DATA);
9851 
9852 	if ((sh->flags2 & PCIE_SHARED2_TXSTATUS_METADATA) == 0)
9853 		bus->dhd->pcie_txs_metadata_enable = 0;
9854 
9855 	bus->dhd->hscb_enable =
9856 		(sh->flags2 & PCIE_SHARED2_HSCB) == PCIE_SHARED2_HSCB;
9857 
9858 #ifdef EWP_EDL
9859 	if (host_edl_support) {
9860 		bus->dhd->dongle_edl_support = (sh->flags2 & PCIE_SHARED2_EDL_RING) ? TRUE : FALSE;
9861 		DHD_ERROR(("Dongle EDL support: %u\n", bus->dhd->dongle_edl_support));
9862 	}
9863 #endif /* EWP_EDL */
9864 
9865 	bus->dhd->debug_buf_dest_support =
9866 		(sh->flags2 & PCIE_SHARED2_DEBUG_BUF_DEST) ? TRUE : FALSE;
9867 	DHD_ERROR(("FW supports debug buf dest ? %s \n",
9868 		bus->dhd->debug_buf_dest_support ? "Y" : "N"));
9869 
9870 #ifdef DHD_HP2P
9871 	if (bus->dhd->hp2p_enable) {
9872 		bus->dhd->hp2p_ts_capable =
9873 			(sh->flags2 & PCIE_SHARED2_PKT_TIMESTAMP) == PCIE_SHARED2_PKT_TIMESTAMP;
9874 		bus->dhd->hp2p_capable =
9875 			(sh->flags2 & PCIE_SHARED2_HP2P) == PCIE_SHARED2_HP2P;
9876 		bus->dhd->hp2p_capable &= bus->dhd->hp2p_ts_capable;
9877 
9878 		DHD_ERROR(("FW supports HP2P ? %s \n",
9879 			bus->dhd->hp2p_capable ? "Y" : "N"));
9880 
9881 		if (bus->dhd->hp2p_capable) {
9882 			bus->dhd->pkt_thresh = HP2P_PKT_THRESH;
9883 			bus->dhd->pkt_expiry = HP2P_PKT_EXPIRY;
9884 			bus->dhd->time_thresh = HP2P_TIME_THRESH;
9885 			for (addr = 0; addr < MAX_HP2P_FLOWS; addr++) {
9886 				hp2p_info_t *hp2p_info = &bus->dhd->hp2p_info[addr];
9887 
9888 				hp2p_info->hrtimer_init = FALSE;
9889 				hp2p_info->timer.function = &dhd_hp2p_write;
9890 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21))
9891 				tasklet_hrtimer_init(&hp2p_info->timer,
9892 					dhd_hp2p_write, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
9893 #else
9894 				hrtimer_init(&hp2p_info->timer, CLOCK_MONOTONIC,
9895 					HRTIMER_MODE_REL_SOFT);
9896 #endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21 */
9897 			}
9898 		}
9899 	}
9900 #endif /* DHD_HP2P */
9901 
9902 #ifdef DHD_DB0TS
9903 	bus->dhd->db0ts_capable =
9904 		(sh->flags & PCIE_SHARED_TIMESTAMP_DB0) == PCIE_SHARED_TIMESTAMP_DB0;
9905 #endif /* DHD_DB0TS */
9906 
9907 	if (MULTIBP_ENAB(bus->sih)) {
9908 		dhd_bus_pcie_pwr_req_clear(bus);
9909 
9910 		/*
9911 		 * WAR to fix ARM cold boot;
9912 		 * De-assert WL domain in DAR
9913 		 */
9914 		if (bus->sih->buscorerev >= 68) {
9915 			dhd_bus_pcie_pwr_req_wl_domain(bus, FALSE);
9916 		}
9917 	}
9918 	return BCME_OK;
9919 } /* dhdpcie_readshared */
9920 
9921 /** Read ring mem and ring state ptr info from shared memory area in device memory */
9922 static void
dhd_fillup_ring_sharedptr_info(dhd_bus_t * bus,ring_info_t * ring_info)9923 dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info)
9924 {
9925 	uint16 i = 0;
9926 	uint16 j = 0;
9927 	uint32 tcm_memloc;
9928 	uint32	d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr;
9929 	uint16  max_tx_flowrings = bus->max_tx_flowrings;
9930 
9931 	/* Ring mem ptr info */
9932 	/* Alloated in the order
9933 		H2D_MSGRING_CONTROL_SUBMIT              0
9934 		H2D_MSGRING_RXPOST_SUBMIT               1
9935 		D2H_MSGRING_CONTROL_COMPLETE            2
9936 		D2H_MSGRING_TX_COMPLETE                 3
9937 		D2H_MSGRING_RX_COMPLETE                 4
9938 	*/
9939 
9940 	{
9941 		/* ringmemptr holds start of the mem block address space */
9942 		tcm_memloc = ltoh32(ring_info->ringmem_ptr);
9943 
9944 		/* Find out ringmem ptr for each ring common  ring */
9945 		for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) {
9946 			bus->ring_sh[i].ring_mem_addr = tcm_memloc;
9947 			/* Update mem block */
9948 			tcm_memloc = tcm_memloc + sizeof(ring_mem_t);
9949 			DHD_INFO(("ring id %d ring mem addr 0x%04x \n",
9950 				i, bus->ring_sh[i].ring_mem_addr));
9951 		}
9952 	}
9953 
9954 	/* Ring state mem ptr info */
9955 	{
9956 		d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr);
9957 		d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr);
9958 		h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr);
9959 		h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr);
9960 
9961 		/* Store h2d common ring write/read pointers */
9962 		for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) {
9963 			bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
9964 			bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
9965 
9966 			/* update mem block */
9967 			h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
9968 			h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
9969 
9970 			DHD_INFO(("h2d w/r : idx %d write %x read %x \n", i,
9971 				bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
9972 		}
9973 
9974 		/* Store d2h common ring write/read pointers */
9975 		for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) {
9976 			bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
9977 			bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
9978 
9979 			/* update mem block */
9980 			d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
9981 			d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
9982 
9983 			DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
9984 				bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
9985 		}
9986 
9987 		/* Store txflow ring write/read pointers */
9988 		if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
9989 			max_tx_flowrings -= BCMPCIE_H2D_COMMON_MSGRINGS;
9990 		} else {
9991 			/* Account for Debug info h2d ring located after the last tx flow ring */
9992 			max_tx_flowrings = max_tx_flowrings + 1;
9993 		}
9994 		for (j = 0; j < max_tx_flowrings; i++, j++)
9995 		{
9996 			bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
9997 			bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
9998 
9999 			/* update mem block */
10000 			h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
10001 			h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
10002 
10003 			DHD_INFO(("FLOW Rings h2d w/r : idx %d write %x read %x \n", i,
10004 				bus->ring_sh[i].ring_state_w,
10005 				bus->ring_sh[i].ring_state_r));
10006 		}
10007 		/* store wr/rd pointers for  debug info completion ring */
10008 		bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
10009 		bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
10010 		d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
10011 		d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
10012 		DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
10013 			bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
10014 	}
10015 } /* dhd_fillup_ring_sharedptr_info */
10016 
10017 /**
10018  * Initialize bus module: prepare for communication with the dongle. Called after downloading
10019  * firmware into the dongle.
10020  */
dhd_bus_init(dhd_pub_t * dhdp,bool enforce_mutex)10021 int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
10022 {
10023 	dhd_bus_t *bus = dhdp->bus;
10024 	int  ret = 0;
10025 
10026 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10027 
10028 	ASSERT(bus->dhd);
10029 	if (!bus->dhd)
10030 		return 0;
10031 
10032 	if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev)) {
10033 		dhd_bus_pcie_pwr_req_clear_reload_war(bus);
10034 	}
10035 
10036 	if (MULTIBP_ENAB(bus->sih)) {
10037 		dhd_bus_pcie_pwr_req(bus);
10038 	}
10039 
10040 	/* Configure AER registers to log the TLP header */
10041 	dhd_bus_aer_config(bus);
10042 
10043 	/* Make sure we're talking to the core. */
10044 	bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
10045 	ASSERT(bus->reg != NULL);
10046 
10047 	/* before opening up bus for data transfer, check if shared are is intact */
10048 
10049 	/* Do minimum console buffer read */
10050 	/* This helps in getting trap messages if any */
10051 	if ((ret = dhdpcie_readshared_console(bus)) >= 0) {
10052 		if ((ret = dhdpcie_bus_readconsole(bus)) < 0) {
10053 			DHD_ERROR(("%s: Console buffer read failed\n",
10054 					__FUNCTION__));
10055 		}
10056 	}
10057 
10058 	ret = dhdpcie_readshared(bus);
10059 	if (ret < 0) {
10060 		DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
10061 		goto exit;
10062 	}
10063 
10064 	/* Make sure we're talking to the core. */
10065 	bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
10066 	ASSERT(bus->reg != NULL);
10067 
10068 	dhd_init_bus_lock(bus);
10069 
10070 	dhd_init_backplane_access_lock(bus);
10071 
10072 	/* Set bus state according to enable result */
10073 	dhdp->busstate = DHD_BUS_DATA;
10074 	bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
10075 	dhdp->dhd_bus_busy_state = 0;
10076 
10077 	/* D11 status via PCIe completion header */
10078 	if ((ret = dhdpcie_init_d11status(bus)) < 0) {
10079 		goto exit;
10080 	}
10081 
10082 	if (!dhd_download_fw_on_driverload)
10083 		dhd_dpc_enable(bus->dhd);
10084 	/* Enable the interrupt after device is up */
10085 	dhdpcie_bus_intr_enable(bus);
10086 
10087 	bus->intr_enabled = TRUE;
10088 
10089 	/* bcmsdh_intr_unmask(bus->sdh); */
10090 #ifdef DHD_PCIE_RUNTIMEPM
10091 	bus->idlecount = 0;
10092 	bus->idletime = (int32)MAX_IDLE_COUNT;
10093 	init_waitqueue_head(&bus->rpm_queue);
10094 	mutex_init(&bus->pm_lock);
10095 #else
10096 	bus->idletime = 0;
10097 #endif /* DHD_PCIE_RUNTIMEPM */
10098 
10099 	/* Make use_d0_inform TRUE for Rev 5 for backward compatibility */
10100 	if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
10101 		bus->use_d0_inform = TRUE;
10102 	} else {
10103 		bus->use_d0_inform = FALSE;
10104 	}
10105 
10106 exit:
10107 	if (MULTIBP_ENAB(bus->sih)) {
10108 		dhd_bus_pcie_pwr_req_clear(bus);
10109 	}
10110 	return ret;
10111 }
10112 
10113 static void
dhdpcie_init_shared_addr(dhd_bus_t * bus)10114 dhdpcie_init_shared_addr(dhd_bus_t *bus)
10115 {
10116 	uint32 addr = 0;
10117 	uint32 val = 0;
10118 
10119 	addr = bus->dongle_ram_base + bus->ramsize - 4;
10120 #ifdef DHD_PCIE_RUNTIMEPM
10121 	dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0));
10122 #endif /* DHD_PCIE_RUNTIMEPM */
10123 	dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
10124 }
10125 
10126 bool
dhdpcie_chipmatch(uint16 vendor,uint16 device)10127 dhdpcie_chipmatch(uint16 vendor, uint16 device)
10128 {
10129 
10130 	if (vendor == PCI_VENDOR_ID_BROADCOM || vendor == PCI_VENDOR_ID_CYPRESS) {
10131 		DHD_ERROR(("%s: Supporting vendor %x device %x\n", __FUNCTION__,
10132 			vendor, device));
10133 	} else {
10134 		DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
10135 			vendor, device));
10136 		return (-ENODEV);
10137 	}
10138 
10139 	if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) ||
10140 		(device == BCM4350_D11AC5G_ID) || (device == BCM4350_CHIP_ID) ||
10141 		(device == BCM43569_CHIP_ID)) {
10142 		return 0;
10143 	}
10144 
10145 	if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) ||
10146 		(device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID)) {
10147 		return 0;
10148 	}
10149 
10150 	if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) ||
10151 		(device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID)) {
10152 		return 0;
10153 	}
10154 
10155 	if ((device == BCM4371_D11AC_ID) || (device == BCM4371_D11AC2G_ID) ||
10156 		(device == BCM4371_D11AC5G_ID) || (device == BCM4371_CHIP_ID)) {
10157 		return 0;
10158 	}
10159 
10160 	if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) ||
10161 		(device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device)) {
10162 		return 0;
10163 	}
10164 
10165 	if ((device == BCM43452_D11AC_ID) || (device == BCM43452_D11AC2G_ID) ||
10166 		(device == BCM43452_D11AC5G_ID)) {
10167 		return 0;
10168 	}
10169 
10170 	if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) ||
10171 		(device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID)) {
10172 		return 0;
10173 	}
10174 
10175 	if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) ||
10176 		(device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID)) {
10177 		return 0;
10178 	}
10179 
10180 	if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) ||
10181 		(device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID)) {
10182 		return 0;
10183 	}
10184 
10185 	if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) ||
10186 		(device == BCM4358_D11AC5G_ID)) {
10187 		return 0;
10188 	}
10189 
10190 	if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) ||
10191 		(device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID)) {
10192 		return 0;
10193 	}
10194 
10195 	if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) ||
10196 		(device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID)) {
10197 		return 0;
10198 	}
10199 
10200 	if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) ||
10201 		(device == BCM4359_D11AC5G_ID)) {
10202 		return 0;
10203 	}
10204 
10205 	if ((device == BCM43596_D11AC_ID) || (device == BCM43596_D11AC2G_ID) ||
10206 		(device == BCM43596_D11AC5G_ID)) {
10207 		return 0;
10208 	}
10209 
10210 	if ((device == BCM43597_D11AC_ID) || (device == BCM43597_D11AC2G_ID) ||
10211 		(device == BCM43597_D11AC5G_ID)) {
10212 		return 0;
10213 	}
10214 
10215 	if ((device == BCM4364_D11AC_ID) || (device == BCM4364_D11AC2G_ID) ||
10216 		(device == BCM4364_D11AC5G_ID) || (device == BCM4364_CHIP_ID)) {
10217 		return 0;
10218 	}
10219 
10220 	if ((device == BCM4361_D11AC_ID) || (device == BCM4361_D11AC2G_ID) ||
10221 		(device == BCM4361_D11AC5G_ID) || (device == BCM4361_CHIP_ID)) {
10222 		return 0;
10223 	}
10224 	if ((device == BCM4362_D11AX_ID) || (device == BCM4362_D11AX2G_ID) ||
10225 		(device == BCM4362_D11AX5G_ID) || (device == BCM4362_CHIP_ID)) {
10226 		return 0;
10227 	}
10228 	if ((device == BCM43751_D11AX_ID) || (device == BCM43751_D11AX2G_ID) ||
10229 		(device == BCM43751_D11AX5G_ID) || (device == BCM43751_CHIP_ID)) {
10230 		return 0;
10231 	}
10232 	if ((device == BCM4347_D11AC_ID) || (device == BCM4347_D11AC2G_ID) ||
10233 		(device == BCM4347_D11AC5G_ID) || (device == BCM4347_CHIP_ID)) {
10234 		return 0;
10235 	}
10236 
10237 	if ((device == BCM4365_D11AC_ID) || (device == BCM4365_D11AC2G_ID) ||
10238 		(device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID)) {
10239 		return 0;
10240 	}
10241 
10242 	if ((device == BCM4366_D11AC_ID) || (device == BCM4366_D11AC2G_ID) ||
10243 		(device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID) ||
10244 		(device == BCM43664_CHIP_ID) || (device == BCM43666_CHIP_ID)) {
10245 		return 0;
10246 	}
10247 
10248 	if ((device == BCM4369_D11AX_ID) || (device == BCM4369_D11AX2G_ID) ||
10249 		(device == BCM4369_D11AX5G_ID) || (device == BCM4369_CHIP_ID)) {
10250 		return 0;
10251 	}
10252 
10253 	if ((device == BCM4373_D11AC_ID) || (device == BCM4373_D11AC2G_ID) ||
10254 		(device == BCM4373_D11AC5G_ID) || (device == BCM4373_CHIP_ID)) {
10255 		return 0;
10256 	}
10257 
10258 	if ((device == BCM4375_D11AX_ID) || (device == BCM4375_D11AX2G_ID) ||
10259 		(device == BCM4375_D11AX5G_ID) || (device == BCM4375_CHIP_ID)) {
10260 		return 0;
10261 	}
10262 
10263 #ifdef CHIPS_CUSTOMER_HW6
10264 	if ((device == BCM4376_D11AC_ID) || (device == BCM4376_D11AC2G_ID) ||
10265 		(device == BCM4376_D11AC5G_ID) || (device == BCM4376_CHIP_ID)) {
10266 		return 0;
10267 	}
10268 	if ((device == BCM4377_M_D11AX_ID) || (device == BCM4377_D11AX_ID) ||
10269 		(device == BCM4377_D11AX2G_ID) || (device == BCM4377_D11AX5G_ID) ||
10270 		(device == BCM4377_CHIP_ID)) {
10271 		return 0;
10272 	}
10273 	if ((device == BCM4378_D11AC_ID) || (device == BCM4378_D11AC2G_ID) ||
10274 		(device == BCM4378_D11AC5G_ID) || (device == BCM4378_CHIP_ID)) {
10275 		return 0;
10276 	}
10277 #endif /* CHIPS_CUSTOMER_HW6 */
10278 #ifdef CHIPS_CUSTOMER_HW6
10279 	if ((device == BCM4368_D11AC_ID) || (device == BCM4368_D11AC2G_ID) ||
10280 		(device == BCM4368_D11AC5G_ID) || (device == BCM4368_CHIP_ID)) {
10281 		return 0;
10282 	}
10283 	if ((device == BCM4367_D11AC_ID) || (device == BCM4367_D11AC2G_ID) ||
10284 		(device == BCM4367_D11AC5G_ID) || (device == BCM4367_CHIP_ID)) {
10285 		return 0;
10286 	}
10287 #endif /* CHIPS_CUSTOMER_HW6 */
10288 
10289 	/* CYW55560 */
10290 	if ((device == CYW55560_WLAN_ID) || (device == CYW89570_WLAN_ID)) {
10291 		return 0;
10292 	}
10293 	DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device));
10294 	return (-ENODEV);
10295 } /* dhdpcie_chipmatch */
10296 
10297 /*
10298  * Name:  dhdpcie_sromotp_customvar
10299  * Description:
10300  * read otp/sprom and parse & store customvar.
10301  * A shadow of OTP/SPROM exists in ChipCommon Region
10302  * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
10303  * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
10304  * can also be read from ChipCommon Registers.
10305  */
10306 static int
dhdpcie_sromotp_customvar(dhd_bus_t * bus,uint32 * customvar1,uint32 * customvar2)10307 dhdpcie_sromotp_customvar(dhd_bus_t *bus,  uint32 *customvar1, uint32 *customvar2)
10308 {
10309 	uint16 dump_offset = 0;
10310 	uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
10311 	/* Table for 65nm OTP Size (in bits) */
10312 	int  otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
10313 	volatile uint16 *nvm_shadow;
10314 	uint cur_coreid;
10315 	uint chipc_corerev;
10316 	chipcregs_t *chipcregs;
10317 	uint16 *otp_dump;
10318 	uint8 *cis;
10319 	uint8 tup, tlen;
10320 	int i = 0;
10321 
10322 	/* Save the current core */
10323 	cur_coreid = si_coreid(bus->sih);
10324 	/* Switch to ChipC */
10325 	chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
10326 	ASSERT(chipcregs != NULL);
10327 	chipc_corerev = si_corerev(bus->sih);
10328 	/* Check ChipcommonCore Rev */
10329 	if (chipc_corerev < 44) {
10330 		DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
10331 		return BCME_UNSUPPORTED;
10332 	}
10333 	/* Check ChipID */
10334 	if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) &&
10335 		((uint16)bus->sih->chip != BCM4355_CHIP_ID) &&
10336 		((uint16)bus->sih->chip != BCM4359_CHIP_ID) &&
10337 		((uint16)bus->sih->chip != BCM4349_CHIP_ID)) {
10338 		DHD_ERROR(("%s: supported for chips"
10339 				"4350/4345/4355/4364/4349/4359 only\n", __FUNCTION__));
10340 		return BCME_UNSUPPORTED;
10341 	}
10342 	/* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
10343 	if (chipcregs->sromcontrol & SRC_PRESENT) {
10344 		/* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
10345 		sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
10346 				>> SRC_SIZE_SHIFT))) * 1024;
10347 		DHD_TRACE(("\nSPROM Present (Size %d bits)\n", sprom_size));
10348 	}
10349 	if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
10350 		DHD_TRACE(("\nOTP Present"));
10351 		if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
10352 				== OTPL_WRAP_TYPE_40NM) {
10353 			/* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
10354 			/* Chipcommon rev51 is a variation on rev45 and does not support
10355 			* the latest OTP configuration.
10356 			*/
10357 			if (chipc_corerev != 51 && chipc_corerev >= 49) {
10358 				otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
10359 					>> OTPL_ROW_SIZE_SHIFT) + 1) * 1024;
10360 				DHD_TRACE(("(Size %d bits)\n", otp_size));
10361 			} else {
10362 				otp_size =  (((chipcregs->capabilities & CC_CAP_OTPSIZE)
10363 						>> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
10364 				DHD_TRACE(("(Size %d bits)\n", otp_size));
10365 			}
10366 		} else {
10367 			/* This part is untested since newer chips have 40nm OTP */
10368 			/* Chipcommon rev51 is a variation on rev45 and does not support
10369 			* the latest OTP configuration.
10370 			*/
10371 				if (chipc_corerev != 51 && chipc_corerev >= 49) {
10372 					otp_size = otp_size_65nm[(chipcregs->otplayout &
10373 							OTPL_ROW_SIZE_MASK) >> OTPL_ROW_SIZE_SHIFT];
10374 					DHD_TRACE(("(Size %d bits)\n", otp_size));
10375 				} else {
10376 					otp_size = otp_size_65nm[(chipcregs->capabilities &
10377 							CC_CAP_OTPSIZE)	>> CC_CAP_OTPSIZE_SHIFT];
10378 					DHD_TRACE(("(Size %d bits)\n", otp_size));
10379 					DHD_TRACE(("%s: 65nm/130nm OTP Size not tested. \n",
10380 							__FUNCTION__));
10381 				}
10382 		}
10383 	}
10384 	/* Chipcommon rev51 is a variation on rev45 and does not support
10385 	* the latest OTP configuration.
10386 	*/
10387 	if (chipc_corerev != 51 && chipc_corerev >= 49) {
10388 		if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
10389 				((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) {
10390 			DHD_ERROR(("%s: SPROM and OTP could not be found "
10391 					"sromcontrol = %x, otplayout = %x \n",
10392 					__FUNCTION__, chipcregs->sromcontrol,
10393 					chipcregs->otplayout));
10394 			return BCME_NOTFOUND;
10395 		}
10396 	} else {
10397 		if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
10398 			((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
10399 			DHD_ERROR(("%s: SPROM and OTP could not be found "
10400 					"sromcontrol = %x, capablities = %x \n",
10401 					__FUNCTION__, chipcregs->sromcontrol,
10402 					chipcregs->capabilities));
10403 			return BCME_NOTFOUND;
10404 		}
10405 	}
10406 	/* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
10407 	if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) &&
10408 			(chipcregs->sromcontrol & SRC_OTPPRESENT)) {
10409 		DHD_TRACE(("OTP Strap selected.\n"
10410 				"\nOTP Shadow in ChipCommon:\n"));
10411 		dump_size = otp_size / 16 ; /* 16bit words */
10412 	} else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
10413 			(chipcregs->sromcontrol & SRC_PRESENT)) {
10414 		DHD_TRACE(("SPROM Strap selected\n"
10415 				"\nSPROM Shadow in ChipCommon:\n"));
10416 		/* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
10417 		/* dump_size in 16bit words */
10418 		dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
10419 	} else {
10420 		DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
10421 				__FUNCTION__));
10422 		return BCME_NOTFOUND;
10423 	}
10424 	if (bus->regs == NULL) {
10425 		DHD_ERROR(("ChipCommon Regs. not initialized\n"));
10426 		return BCME_NOTREADY;
10427 	} else {
10428 		/* Chipcommon rev51 is a variation on rev45 and does not support
10429 		* the latest OTP configuration.
10430 		*/
10431 		if (chipc_corerev != 51 && chipc_corerev >= 49) {
10432 			/* Chip common can read only 8kbits,
10433 			* for ccrev >= 49 otp size is around 12 kbits so use GCI core
10434 			*/
10435 			nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0);
10436 		}  else {
10437 			/* Point to the SPROM/OTP shadow in ChipCommon */
10438 			nvm_shadow = chipcregs->sromotp;
10439 		}
10440 		if (nvm_shadow == NULL) {
10441 			DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__));
10442 			return BCME_NOTFOUND;
10443 		}
10444 		otp_dump = kzalloc(dump_size*2, GFP_KERNEL);
10445 		if (otp_dump == NULL) {
10446 			DHD_ERROR(("%s: Insufficient system memory of size %d\n",
10447 				__FUNCTION__, dump_size));
10448 			return BCME_NOMEM;
10449 		}
10450 		/*
10451 		* Read 16 bits / iteration.
10452 		* dump_size & dump_offset in 16-bit words
10453 		*/
10454 		while (dump_offset < dump_size) {
10455 			*(otp_dump + dump_offset) = *(nvm_shadow + dump_offset);
10456 			dump_offset += 0x1;
10457 		}
10458 		/* Read from cis tuple start address */
10459 		cis = (uint8 *)otp_dump + CISTPL_OFFSET;
10460 		/* parse value of customvar2 tuple */
10461 		do {
10462 			tup = cis[i++];
10463 			if (tup == CISTPL_NULL || tup == CISTPL_END)
10464 				tlen = 0;
10465 			else
10466 				tlen = cis[i++];
10467 			if ((i + tlen) >= dump_size*2)
10468 				break;
10469 			switch (tup) {
10470 				case CISTPL_BRCM_HNBU:
10471 				switch (cis[i]) {
10472 					case HNBU_CUSTOM1:
10473 						*customvar1 = ((cis[i + 4] << 24) +
10474 								(cis[i + 3] << 16) +
10475 								(cis[i + 2] << 8) +
10476 								cis[i + 1]);
10477 						DHD_TRACE(("%s : customvar1 [%x]\n",
10478 								__FUNCTION__, *customvar1));
10479 						break;
10480 					case HNBU_CUSTOM2:
10481 						*customvar2 = ((cis[i + 4] << 24) +
10482 								(cis[i + 3] << 16) +
10483 								(cis[i + 2] << 8) +
10484 								cis[i + 1]);
10485 						DHD_TRACE(("%s : customvar2 [%x]\n",
10486 							__FUNCTION__, *customvar2));
10487 						break;
10488 					default:
10489 						break;
10490 				}
10491 					break;
10492 				default:
10493 					break;
10494 			}
10495 			i += tlen;
10496 		} while (tup != 0xff);
10497 
10498 		if (otp_dump) {
10499 			kfree(otp_dump);
10500 			otp_dump = NULL;
10501 		}
10502 	}
10503 	/* Switch back to the original core */
10504 	si_setcore(bus->sih, cur_coreid, 0);
10505 	return BCME_OK;
10506 } /* dhdpcie_sromotp_customvar */
10507 
10508 /**
10509  * Name:  dhdpcie_cc_nvmshadow
10510  *
10511  * Description:
10512  * A shadow of OTP/SPROM exists in ChipCommon Region
10513  * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
10514  * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
10515  * can also be read from ChipCommon Registers.
10516  */
10517 static int
dhdpcie_cc_nvmshadow(dhd_bus_t * bus,struct bcmstrbuf * b)10518 dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b)
10519 {
10520 	uint16 dump_offset = 0;
10521 	uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
10522 
10523 	/* Table for 65nm OTP Size (in bits) */
10524 	int  otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
10525 
10526 	volatile uint16 *nvm_shadow;
10527 
10528 	uint cur_coreid;
10529 	uint chipc_corerev;
10530 	chipcregs_t *chipcregs;
10531 
10532 	/* Save the current core */
10533 	cur_coreid = si_coreid(bus->sih);
10534 	/* Switch to ChipC */
10535 	chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
10536 	ASSERT(chipcregs != NULL);
10537 
10538 	chipc_corerev = si_corerev(bus->sih);
10539 
10540 	/* Check ChipcommonCore Rev */
10541 	if (chipc_corerev < 44) {
10542 		DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
10543 		return BCME_UNSUPPORTED;
10544 	}
10545 
10546 	/* Check ChipID */
10547 	if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) &&
10548 	        ((uint16)bus->sih->chip != BCM4355_CHIP_ID) &&
10549 	        ((uint16)bus->sih->chip != BCM4364_CHIP_ID)) {
10550 		DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips"
10551 					"4350/4345/4355/4364 only\n", __FUNCTION__));
10552 		return BCME_UNSUPPORTED;
10553 	}
10554 
10555 	/* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
10556 	if (chipcregs->sromcontrol & SRC_PRESENT) {
10557 		/* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
10558 		sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
10559 					>> SRC_SIZE_SHIFT))) * 1024;
10560 		bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size);
10561 	}
10562 
10563 	if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
10564 		bcm_bprintf(b, "\nOTP Present");
10565 
10566 		if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
10567 			== OTPL_WRAP_TYPE_40NM) {
10568 			/* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
10569 			/* Chipcommon rev51 is a variation on rev45 and does not support
10570 			 * the latest OTP configuration.
10571 			 */
10572 			if (chipc_corerev != 51 && chipc_corerev >= 49) {
10573 				otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
10574 					>> OTPL_ROW_SIZE_SHIFT) + 1) * 1024;
10575 				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
10576 			} else {
10577 				otp_size =  (((chipcregs->capabilities & CC_CAP_OTPSIZE)
10578 				        >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
10579 				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
10580 			}
10581 		} else {
10582 			/* This part is untested since newer chips have 40nm OTP */
10583 			/* Chipcommon rev51 is a variation on rev45 and does not support
10584 			 * the latest OTP configuration.
10585 			 */
10586 			if (chipc_corerev != 51 && chipc_corerev >= 49) {
10587 				otp_size = otp_size_65nm[(chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
10588 						>> OTPL_ROW_SIZE_SHIFT];
10589 				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
10590 			} else {
10591 				otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE)
10592 					        >> CC_CAP_OTPSIZE_SHIFT];
10593 				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
10594 				DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
10595 					__FUNCTION__));
10596 			}
10597 		}
10598 	}
10599 
10600 	/* Chipcommon rev51 is a variation on rev45 and does not support
10601 	 * the latest OTP configuration.
10602 	 */
10603 	if (chipc_corerev != 51 && chipc_corerev >= 49) {
10604 		if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
10605 			((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) {
10606 			DHD_ERROR(("%s: SPROM and OTP could not be found "
10607 				"sromcontrol = %x, otplayout = %x \n",
10608 				__FUNCTION__, chipcregs->sromcontrol, chipcregs->otplayout));
10609 			return BCME_NOTFOUND;
10610 		}
10611 	} else {
10612 		if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
10613 			((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
10614 			DHD_ERROR(("%s: SPROM and OTP could not be found "
10615 				"sromcontrol = %x, capablities = %x \n",
10616 				__FUNCTION__, chipcregs->sromcontrol, chipcregs->capabilities));
10617 			return BCME_NOTFOUND;
10618 		}
10619 	}
10620 
10621 	/* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
10622 	if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) &&
10623 		(chipcregs->sromcontrol & SRC_OTPPRESENT)) {
10624 
10625 		bcm_bprintf(b, "OTP Strap selected.\n"
10626 		               "\nOTP Shadow in ChipCommon:\n");
10627 
10628 		dump_size = otp_size / 16 ; /* 16bit words */
10629 
10630 	} else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
10631 		(chipcregs->sromcontrol & SRC_PRESENT)) {
10632 
10633 		bcm_bprintf(b, "SPROM Strap selected\n"
10634 				"\nSPROM Shadow in ChipCommon:\n");
10635 
10636 		/* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
10637 		/* dump_size in 16bit words */
10638 		dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
10639 	} else {
10640 		DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
10641 			__FUNCTION__));
10642 		return BCME_NOTFOUND;
10643 	}
10644 
10645 	if (bus->regs == NULL) {
10646 		DHD_ERROR(("ChipCommon Regs. not initialized\n"));
10647 		return BCME_NOTREADY;
10648 	} else {
10649 		bcm_bprintf(b, "\n OffSet:");
10650 
10651 		/* Chipcommon rev51 is a variation on rev45 and does not support
10652 		 * the latest OTP configuration.
10653 		 */
10654 		if (chipc_corerev != 51 && chipc_corerev >= 49) {
10655 			/* Chip common can read only 8kbits,
10656 			* for ccrev >= 49 otp size is around 12 kbits so use GCI core
10657 			*/
10658 			nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0);
10659 		} else {
10660 			/* Point to the SPROM/OTP shadow in ChipCommon */
10661 			nvm_shadow = chipcregs->sromotp;
10662 		}
10663 
10664 		if (nvm_shadow == NULL) {
10665 			DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__));
10666 			return BCME_NOTFOUND;
10667 		}
10668 
10669 		/*
10670 		* Read 16 bits / iteration.
10671 		* dump_size & dump_offset in 16-bit words
10672 		*/
10673 		while (dump_offset < dump_size) {
10674 			if (dump_offset % 2 == 0)
10675 				/* Print the offset in the shadow space in Bytes */
10676 				bcm_bprintf(b, "\n 0x%04x", dump_offset * 2);
10677 
10678 			bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
10679 			dump_offset += 0x1;
10680 		}
10681 	}
10682 
10683 	/* Switch back to the original core */
10684 	si_setcore(bus->sih, cur_coreid, 0);
10685 
10686 	return BCME_OK;
10687 } /* dhdpcie_cc_nvmshadow */
10688 
10689 /** Flow rings are dynamically created and destroyed */
dhd_bus_clean_flow_ring(dhd_bus_t * bus,void * node)10690 void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node)
10691 {
10692 	void *pkt;
10693 	flow_queue_t *queue;
10694 	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node;
10695 	unsigned long flags;
10696 
10697 	queue = &flow_ring_node->queue;
10698 
10699 #ifdef DHDTCPACK_SUPPRESS
10700 	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
10701 	 * when there is a newly coming packet from network stack.
10702 	 */
10703 	dhd_tcpack_info_tbl_clean(bus->dhd);
10704 #endif /* DHDTCPACK_SUPPRESS */
10705 
10706 #ifdef DHD_HP2P
10707 	if (flow_ring_node->hp2p_ring) {
10708 		bus->dhd->hp2p_ring_active = FALSE;
10709 		flow_ring_node->hp2p_ring = FALSE;
10710 	}
10711 #endif /* DHD_HP2P */
10712 
10713 	/* clean up BUS level info */
10714 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
10715 
10716 	/* Flush all pending packets in the queue, if any */
10717 	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
10718 		PKTFREE(bus->dhd->osh, pkt, TRUE);
10719 	}
10720 	ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
10721 
10722 	/* Reinitialise flowring's queue */
10723 	dhd_flow_queue_reinit(bus->dhd, queue, FLOW_RING_QUEUE_THRESHOLD);
10724 	flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
10725 	flow_ring_node->active = FALSE;
10726 
10727 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
10728 
10729 	/* Hold flowring_list_lock to ensure no race condition while accessing the List */
10730 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
10731 	dll_delete(&flow_ring_node->list);
10732 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
10733 
10734 	/* Release the flowring object back into the pool */
10735 	dhd_prot_flowrings_pool_release(bus->dhd,
10736 		flow_ring_node->flowid, flow_ring_node->prot_info);
10737 
10738 	/* Free the flowid back to the flowid allocator */
10739 	dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
10740 	                flow_ring_node->flowid);
10741 }
10742 
10743 /**
10744  * Allocate a Flow ring buffer,
10745  * Init Ring buffer, send Msg to device about flow ring creation
10746 */
10747 int
dhd_bus_flow_ring_create_request(dhd_bus_t * bus,void * arg)10748 dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg)
10749 {
10750 	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
10751 
10752 	DHD_INFO(("%s :Flow create\n", __FUNCTION__));
10753 
10754 	/* Send Msg to device about flow ring creation */
10755 	if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK)
10756 		return BCME_NOMEM;
10757 
10758 	return BCME_OK;
10759 }
10760 
10761 /** Handle response from dongle on a 'flow ring create' request */
10762 void
dhd_bus_flow_ring_create_response(dhd_bus_t * bus,uint16 flowid,int32 status)10763 dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status)
10764 {
10765 	flow_ring_node_t *flow_ring_node;
10766 	unsigned long flags;
10767 
10768 	DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
10769 
10770 	/* Boundary check of the flowid */
10771 	if (flowid >= bus->dhd->num_flow_rings) {
10772 		DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
10773 			flowid, bus->dhd->num_flow_rings));
10774 		return;
10775 	}
10776 
10777 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
10778 	if (!flow_ring_node) {
10779 		DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
10780 		return;
10781 	}
10782 
10783 	ASSERT(flow_ring_node->flowid == flowid);
10784 	if (flow_ring_node->flowid != flowid) {
10785 		DHD_ERROR(("%s: flowid %d is different from the flowid "
10786 			"of the flow_ring_node %d\n", __FUNCTION__, flowid,
10787 			flow_ring_node->flowid));
10788 		return;
10789 	}
10790 
10791 	if (status != BCME_OK) {
10792 		DHD_ERROR(("%s Flow create Response failure error status = %d \n",
10793 		     __FUNCTION__, status));
10794 		/* Call Flow clean up */
10795 		dhd_bus_clean_flow_ring(bus, flow_ring_node);
10796 		return;
10797 	}
10798 
10799 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
10800 	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
10801 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
10802 
10803 	/* Now add the Flow ring node into the active list
10804 	 * Note that this code to add the newly created node to the active
10805 	 * list was living in dhd_flowid_lookup. But note that after
10806 	 * adding the node to the active list the contents of node is being
10807 	 * filled in dhd_prot_flow_ring_create.
10808 	 * If there is a D2H interrupt after the node gets added to the
10809 	 * active list and before the node gets populated with values
10810 	 * from the Bottom half dhd_update_txflowrings would be called.
10811 	 * which will then try to walk through the active flow ring list,
10812 	 * pickup the nodes and operate on them. Now note that since
10813 	 * the function dhd_prot_flow_ring_create is not finished yet
10814 	 * the contents of flow_ring_node can still be NULL leading to
10815 	 * crashes. Hence the flow_ring_node should be added to the
10816 	 * active list only after its truely created, which is after
10817 	 * receiving the create response message from the Host.
10818 	 */
10819 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
10820 	dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
10821 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
10822 
10823 	dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
10824 
10825 	return;
10826 }
10827 
10828 int
dhd_bus_flow_ring_delete_request(dhd_bus_t * bus,void * arg)10829 dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg)
10830 {
10831 	void * pkt;
10832 	flow_queue_t *queue;
10833 	flow_ring_node_t *flow_ring_node;
10834 	unsigned long flags;
10835 
10836 	DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
10837 
10838 	flow_ring_node = (flow_ring_node_t *)arg;
10839 
10840 #ifdef DHDTCPACK_SUPPRESS
10841 	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
10842 	 * when there is a newly coming packet from network stack.
10843 	 */
10844 	dhd_tcpack_info_tbl_clean(bus->dhd);
10845 #endif /* DHDTCPACK_SUPPRESS */
10846 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
10847 	if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) {
10848 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
10849 		DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__, flow_ring_node->flowid));
10850 		return BCME_ERROR;
10851 	}
10852 	flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
10853 
10854 	queue = &flow_ring_node->queue; /* queue associated with flow ring */
10855 
10856 	/* Flush all pending packets in the queue, if any */
10857 	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
10858 		PKTFREE(bus->dhd->osh, pkt, TRUE);
10859 	}
10860 	ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
10861 
10862 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
10863 
10864 	/* Send Msg to device about flow ring deletion */
10865 	dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node);
10866 
10867 	return BCME_OK;
10868 }
10869 
10870 void
dhd_bus_flow_ring_delete_response(dhd_bus_t * bus,uint16 flowid,uint32 status)10871 dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
10872 {
10873 	flow_ring_node_t *flow_ring_node;
10874 
10875 	DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
10876 
10877 	/* Boundary check of the flowid */
10878 	if (flowid >= bus->dhd->num_flow_rings) {
10879 		DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
10880 			flowid, bus->dhd->num_flow_rings));
10881 		return;
10882 	}
10883 
10884 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
10885 	if (!flow_ring_node) {
10886 		DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
10887 		return;
10888 	}
10889 
10890 	ASSERT(flow_ring_node->flowid == flowid);
10891 	if (flow_ring_node->flowid != flowid) {
10892 		DHD_ERROR(("%s: flowid %d is different from the flowid "
10893 			"of the flow_ring_node %d\n", __FUNCTION__, flowid,
10894 			flow_ring_node->flowid));
10895 		return;
10896 	}
10897 
10898 	if (status != BCME_OK) {
10899 		DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
10900 		    __FUNCTION__, status));
10901 		return;
10902 	}
10903 	/* Call Flow clean up */
10904 	dhd_bus_clean_flow_ring(bus, flow_ring_node);
10905 
10906 	return;
10907 
10908 }
10909 
dhd_bus_flow_ring_flush_request(dhd_bus_t * bus,void * arg)10910 int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
10911 {
10912 	void *pkt;
10913 	flow_queue_t *queue;
10914 	flow_ring_node_t *flow_ring_node;
10915 	unsigned long flags;
10916 
10917 	DHD_INFO(("%s :Flow Flush\n", __FUNCTION__));
10918 
10919 	flow_ring_node = (flow_ring_node_t *)arg;
10920 
10921 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
10922 	queue = &flow_ring_node->queue; /* queue associated with flow ring */
10923 	/* Flow ring status will be set back to FLOW_RING_STATUS_OPEN
10924 	 * once flow ring flush response is received for this flowring node.
10925 	 */
10926 	flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
10927 
10928 #ifdef DHDTCPACK_SUPPRESS
10929 	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
10930 	 * when there is a newly coming packet from network stack.
10931 	 */
10932 	dhd_tcpack_info_tbl_clean(bus->dhd);
10933 #endif /* DHDTCPACK_SUPPRESS */
10934 
10935 	/* Flush all pending packets in the queue, if any */
10936 	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
10937 		PKTFREE(bus->dhd->osh, pkt, TRUE);
10938 	}
10939 	ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
10940 
10941 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
10942 
10943 	/* Send Msg to device about flow ring flush */
10944 	dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
10945 
10946 	return BCME_OK;
10947 }
10948 
10949 void
dhd_bus_flow_ring_flush_response(dhd_bus_t * bus,uint16 flowid,uint32 status)10950 dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
10951 {
10952 	flow_ring_node_t *flow_ring_node;
10953 
10954 	if (status != BCME_OK) {
10955 		DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
10956 		    __FUNCTION__, status));
10957 		return;
10958 	}
10959 
10960 	/* Boundary check of the flowid */
10961 	if (flowid >= bus->dhd->num_flow_rings) {
10962 		DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
10963 			flowid, bus->dhd->num_flow_rings));
10964 		return;
10965 	}
10966 
10967 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
10968 	if (!flow_ring_node) {
10969 		DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
10970 		return;
10971 	}
10972 
10973 	ASSERT(flow_ring_node->flowid == flowid);
10974 	if (flow_ring_node->flowid != flowid) {
10975 		DHD_ERROR(("%s: flowid %d is different from the flowid "
10976 			"of the flow_ring_node %d\n", __FUNCTION__, flowid,
10977 			flow_ring_node->flowid));
10978 		return;
10979 	}
10980 
10981 	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
10982 	return;
10983 }
10984 
10985 uint32
dhd_bus_max_h2d_queues(struct dhd_bus * bus)10986 dhd_bus_max_h2d_queues(struct dhd_bus *bus)
10987 {
10988 	return bus->max_submission_rings;
10989 }
10990 
10991 /* To be symmetric with SDIO */
10992 void
dhd_bus_pktq_flush(dhd_pub_t * dhdp)10993 dhd_bus_pktq_flush(dhd_pub_t *dhdp)
10994 {
10995 	return;
10996 }
10997 
10998 void
dhd_bus_set_linkdown(dhd_pub_t * dhdp,bool val)10999 dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val)
11000 {
11001 	dhdp->bus->is_linkdown = val;
11002 }
11003 
11004 int
dhd_bus_get_linkdown(dhd_pub_t * dhdp)11005 dhd_bus_get_linkdown(dhd_pub_t *dhdp)
11006 {
11007 	return dhdp->bus->is_linkdown;
11008 }
11009 
11010 int
dhd_bus_get_cto(dhd_pub_t * dhdp)11011 dhd_bus_get_cto(dhd_pub_t *dhdp)
11012 {
11013 	return dhdp->bus->cto_triggered;
11014 }
11015 
11016 #ifdef IDLE_TX_FLOW_MGMT
11017 /* resume request */
11018 int
dhd_bus_flow_ring_resume_request(dhd_bus_t * bus,void * arg)11019 dhd_bus_flow_ring_resume_request(dhd_bus_t *bus, void *arg)
11020 {
11021 	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
11022 
11023 	DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__, flow_ring_node->flowid));
11024 
11025 	flow_ring_node->status = FLOW_RING_STATUS_RESUME_PENDING;
11026 
11027 	/* Send Msg to device about flow ring resume */
11028 	dhd_prot_flow_ring_resume(bus->dhd, flow_ring_node);
11029 
11030 	return BCME_OK;
11031 }
11032 
11033 /* add the node back to active flowring */
11034 void
dhd_bus_flow_ring_resume_response(dhd_bus_t * bus,uint16 flowid,int32 status)11035 dhd_bus_flow_ring_resume_response(dhd_bus_t *bus, uint16 flowid, int32 status)
11036 {
11037 
11038 	flow_ring_node_t *flow_ring_node;
11039 
11040 	DHD_TRACE(("%s :flowid %d \n", __FUNCTION__, flowid));
11041 
11042 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
11043 	ASSERT(flow_ring_node->flowid == flowid);
11044 
11045 	if (status != BCME_OK) {
11046 		DHD_ERROR(("%s Error Status = %d \n",
11047 			__FUNCTION__, status));
11048 		return;
11049 	}
11050 
11051 	DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n",
11052 		__FUNCTION__, flow_ring_node->flowid,  flow_ring_node->queue.len));
11053 
11054 	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
11055 
11056 	dhd_bus_schedule_queue(bus, flowid, FALSE);
11057 	return;
11058 }
11059 
11060 /* scan the flow rings in active list for idle time out */
11061 void
dhd_bus_check_idle_scan(dhd_bus_t * bus)11062 dhd_bus_check_idle_scan(dhd_bus_t *bus)
11063 {
11064 	uint64 time_stamp; /* in millisec */
11065 	uint64 diff;
11066 
11067 	time_stamp = OSL_SYSUPTIME();
11068 	diff = time_stamp - bus->active_list_last_process_ts;
11069 
11070 	if (diff > IDLE_FLOW_LIST_TIMEOUT) {
11071 		dhd_bus_idle_scan(bus);
11072 		bus->active_list_last_process_ts = OSL_SYSUPTIME();
11073 	}
11074 
11075 	return;
11076 }
11077 
11078 /* scan the nodes in active list till it finds a non idle node */
11079 void
dhd_bus_idle_scan(dhd_bus_t * bus)11080 dhd_bus_idle_scan(dhd_bus_t *bus)
11081 {
11082 	dll_t *item, *prev;
11083 	flow_ring_node_t *flow_ring_node;
11084 	uint64 time_stamp, diff;
11085 	unsigned long flags;
11086 	uint16 ringid[MAX_SUSPEND_REQ];
11087 	uint16 count = 0;
11088 
11089 	time_stamp = OSL_SYSUPTIME();
11090 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
11091 
11092 	for (item = dll_tail_p(&bus->flowring_active_list);
11093 	         !dll_end(&bus->flowring_active_list, item); item = prev) {
11094 		prev = dll_prev_p(item);
11095 
11096 		flow_ring_node = dhd_constlist_to_flowring(item);
11097 
11098 		if (flow_ring_node->flowid == (bus->max_submission_rings - 1))
11099 			continue;
11100 
11101 		if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
11102 			/* Takes care of deleting zombie rings */
11103 			/* delete from the active list */
11104 			DHD_INFO(("deleting flow id %u from active list\n",
11105 				flow_ring_node->flowid));
11106 			__dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
11107 			continue;
11108 		}
11109 
11110 		diff = time_stamp - flow_ring_node->last_active_ts;
11111 
11112 		if ((diff > IDLE_FLOW_RING_TIMEOUT) && !(flow_ring_node->queue.len))  {
11113 			DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node->flowid));
11114 			/* delete from the active list */
11115 			__dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
11116 			flow_ring_node->status = FLOW_RING_STATUS_SUSPENDED;
11117 			ringid[count] = flow_ring_node->flowid;
11118 			count++;
11119 			if (count == MAX_SUSPEND_REQ) {
11120 				/* create a batch message now!! */
11121 				dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
11122 				count = 0;
11123 			}
11124 
11125 		} else {
11126 
11127 			/* No more scanning, break from here! */
11128 			break;
11129 		}
11130 	}
11131 
11132 	if (count) {
11133 		dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
11134 	}
11135 
11136 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
11137 
11138 	return;
11139 }
11140 
dhd_flow_ring_move_to_active_list_head(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)11141 void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
11142 {
11143 	unsigned long flags;
11144 	dll_t* list;
11145 
11146 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
11147 	/* check if the node is already at head, otherwise delete it and prepend */
11148 	list = dll_head_p(&bus->flowring_active_list);
11149 	if (&flow_ring_node->list != list) {
11150 		dll_delete(&flow_ring_node->list);
11151 		dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
11152 	}
11153 
11154 	/* update flow ring timestamp */
11155 	flow_ring_node->last_active_ts = OSL_SYSUPTIME();
11156 
11157 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
11158 
11159 	return;
11160 }
11161 
dhd_flow_ring_add_to_active_list(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)11162 void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
11163 {
11164 	unsigned long flags;
11165 
11166 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
11167 
11168 	dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
11169 	/* update flow ring timestamp */
11170 	flow_ring_node->last_active_ts = OSL_SYSUPTIME();
11171 
11172 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
11173 
11174 	return;
11175 }
__dhd_flow_ring_delete_from_active_list(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)11176 void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
11177 {
11178 	dll_delete(&flow_ring_node->list);
11179 }
11180 
dhd_flow_ring_delete_from_active_list(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)11181 void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
11182 {
11183 	unsigned long flags;
11184 
11185 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
11186 
11187 	__dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
11188 
11189 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
11190 
11191 	return;
11192 }
11193 #endif /* IDLE_TX_FLOW_MGMT */
11194 
11195 int
dhdpcie_bus_clock_start(struct dhd_bus * bus)11196 dhdpcie_bus_clock_start(struct dhd_bus *bus)
11197 {
11198 	return dhdpcie_start_host_pcieclock(bus);
11199 }
11200 
11201 int
dhdpcie_bus_clock_stop(struct dhd_bus * bus)11202 dhdpcie_bus_clock_stop(struct dhd_bus *bus)
11203 {
11204 	return dhdpcie_stop_host_pcieclock(bus);
11205 }
11206 
11207 int
dhdpcie_bus_disable_device(struct dhd_bus * bus)11208 dhdpcie_bus_disable_device(struct dhd_bus *bus)
11209 {
11210 	return dhdpcie_disable_device(bus);
11211 }
11212 
11213 int
dhdpcie_bus_enable_device(struct dhd_bus * bus)11214 dhdpcie_bus_enable_device(struct dhd_bus *bus)
11215 {
11216 	return dhdpcie_enable_device(bus);
11217 }
11218 
11219 int
dhdpcie_bus_alloc_resource(struct dhd_bus * bus)11220 dhdpcie_bus_alloc_resource(struct dhd_bus *bus)
11221 {
11222 	return dhdpcie_alloc_resource(bus);
11223 }
11224 
11225 void
dhdpcie_bus_free_resource(struct dhd_bus * bus)11226 dhdpcie_bus_free_resource(struct dhd_bus *bus)
11227 {
11228 	dhdpcie_free_resource(bus);
11229 }
11230 
11231 int
dhd_bus_request_irq(struct dhd_bus * bus)11232 dhd_bus_request_irq(struct dhd_bus *bus)
11233 {
11234 	return dhdpcie_bus_request_irq(bus);
11235 }
11236 
11237 bool
dhdpcie_bus_dongle_attach(struct dhd_bus * bus)11238 dhdpcie_bus_dongle_attach(struct dhd_bus *bus)
11239 {
11240 	return dhdpcie_dongle_attach(bus);
11241 }
11242 
11243 int
dhd_bus_release_dongle(struct dhd_bus * bus)11244 dhd_bus_release_dongle(struct dhd_bus *bus)
11245 {
11246 	bool dongle_isolation;
11247 	osl_t *osh;
11248 
11249 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
11250 
11251 	if (bus) {
11252 		osh = bus->osh;
11253 		ASSERT(osh);
11254 
11255 		if (bus->dhd) {
11256 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
11257 			debugger_close();
11258 #endif /* DEBUGGER || DHD_DSCOPE */
11259 
11260 			dongle_isolation = bus->dhd->dongle_isolation;
11261 			dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
11262 		}
11263 	}
11264 
11265 	return 0;
11266 }
11267 
11268 int
dhdpcie_cto_cfg_init(struct dhd_bus * bus,bool enable)11269 dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable)
11270 {
11271 	uint32 val;
11272 	if (enable) {
11273 		dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4,
11274 			PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR);
11275 		val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
11276 		dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_BACKPLANE_EN);
11277 	} else {
11278 		dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0);
11279 		val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
11280 		dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_BACKPLANE_EN);
11281 	}
11282 	return 0;
11283 }
11284 
11285 int
dhdpcie_cto_init(struct dhd_bus * bus,bool enable)11286 dhdpcie_cto_init(struct dhd_bus *bus, bool enable)
11287 {
11288 	if (bus->sih->buscorerev < 19) {
11289 		DHD_INFO(("%s: Unsupported CTO, buscorerev=%d\n",
11290 			__FUNCTION__, bus->sih->buscorerev));
11291 		return BCME_UNSUPPORTED;
11292 	}
11293 
11294 	if (bus->sih->buscorerev == 19) {
11295 		uint32 pcie_lnkst;
11296 		si_corereg(bus->sih, bus->sih->buscoreidx,
11297 			OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS);
11298 
11299 		pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx,
11300 			OFFSETOF(sbpcieregs_t, configdata), 0, 0);
11301 
11302 		if (((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) &
11303 			PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1) {
11304 			return BCME_UNSUPPORTED;
11305 		}
11306 	}
11307 
11308 	bus->cto_enable = enable;
11309 
11310 	dhdpcie_cto_cfg_init(bus, enable);
11311 
11312 	if (enable) {
11313 		if (bus->cto_threshold == 0) {
11314 			bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT;
11315 		}
11316 		si_corereg(bus->sih, bus->sih->buscoreidx,
11317 			OFFSETOF(sbpcieregs_t, ctoctrl), ~0,
11318 			((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) &
11319 			PCIE_CTO_TO_THRESHHOLD_MASK) |
11320 			((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) &
11321 			PCIE_CTO_CLKCHKCNT_MASK) |
11322 			PCIE_CTO_ENAB_MASK);
11323 	} else {
11324 		si_corereg(bus->sih, bus->sih->buscoreidx,
11325 			OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0);
11326 	}
11327 
11328 	DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n",
11329 		__FUNCTION__, bus->cto_enable));
11330 
11331 	return 0;
11332 }
11333 
11334 static int
dhdpcie_cto_error_recovery(struct dhd_bus * bus)11335 dhdpcie_cto_error_recovery(struct dhd_bus *bus)
11336 {
11337 	uint32 pci_intmask, err_status;
11338 	uint8 i = 0;
11339 	uint32 val;
11340 
11341 	pci_intmask = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_MASK, 4);
11342 	dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, pci_intmask & ~PCI_CTO_INT_MASK);
11343 
11344 	DHD_OS_WAKE_LOCK(bus->dhd);
11345 
11346 	DHD_ERROR(("--- CTO Triggered --- %d\n", bus->pwr_req_ref));
11347 
11348 	/*
11349 	 * DAR still accessible
11350 	 */
11351 	dhd_bus_dump_dar_registers(bus);
11352 
11353 	/* reset backplane */
11354 	val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
11355 	dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_CFG_TO_SB_RST);
11356 
11357 	/* clear timeout error */
11358 	while (1) {
11359 		err_status =  si_corereg(bus->sih, bus->sih->buscoreidx,
11360 			DAR_ERRLOG(bus->sih->buscorerev),
11361 			0, 0);
11362 		if (err_status & PCIE_CTO_ERR_MASK) {
11363 			si_corereg(bus->sih, bus->sih->buscoreidx,
11364 					DAR_ERRLOG(bus->sih->buscorerev),
11365 					~0, PCIE_CTO_ERR_MASK);
11366 		} else {
11367 			break;
11368 		}
11369 		OSL_DELAY(CTO_TO_CLEAR_WAIT_MS * 1000);
11370 		i++;
11371 		if (i > CTO_TO_CLEAR_WAIT_MAX_CNT) {
11372 			DHD_ERROR(("cto recovery fail\n"));
11373 
11374 			DHD_OS_WAKE_UNLOCK(bus->dhd);
11375 			return BCME_ERROR;
11376 		}
11377 	}
11378 
11379 	/* clear interrupt status */
11380 	dhdpcie_bus_cfg_write_dword(bus, PCI_INT_STATUS, 4, PCI_CTO_INT_MASK);
11381 
11382 	/* Halt ARM & remove reset */
11383 	/* TBD : we can add ARM Halt here in case */
11384 
11385 	/* reset SPROM_CFG_TO_SB_RST */
11386 	val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
11387 
11388 	DHD_ERROR(("cto recovery reset 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
11389 		PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
11390 	dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_CFG_TO_SB_RST);
11391 
11392 	val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
11393 	DHD_ERROR(("cto recovery success 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
11394 		PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
11395 
11396 	DHD_OS_WAKE_UNLOCK(bus->dhd);
11397 
11398 	return BCME_OK;
11399 }
11400 
11401 void
dhdpcie_ssreset_dis_enum_rst(struct dhd_bus * bus)11402 dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus)
11403 {
11404 	uint32 val;
11405 
11406 	val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
11407 	dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
11408 		val | (0x1 << PCIE_SSRESET_DIS_ENUM_RST_BIT));
11409 }
11410 
11411 #if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
11412 static int
dhdpcie_init_d11status(struct dhd_bus * bus)11413 dhdpcie_init_d11status(struct dhd_bus *bus)
11414 {
11415 	uint32 addr;
11416 	uint32 flags2;
11417 	int ret = 0;
11418 
11419 	if (bus->pcie_sh->flags2 & PCIE_SHARED2_D2H_D11_TX_STATUS) {
11420 		flags2 = bus->pcie_sh->flags2;
11421 		addr = bus->shared_addr + OFFSETOF(pciedev_shared_t, flags2);
11422 		flags2 |= PCIE_SHARED2_H2D_D11_TX_STATUS;
11423 		ret = dhdpcie_bus_membytes(bus, TRUE, addr,
11424 			(uint8 *)&flags2, sizeof(flags2));
11425 		if (ret < 0) {
11426 			DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n",
11427 				__FUNCTION__));
11428 			return ret;
11429 		}
11430 		bus->pcie_sh->flags2 = flags2;
11431 		bus->dhd->d11_tx_status = TRUE;
11432 	}
11433 	return ret;
11434 }
11435 
11436 #else
11437 static int
dhdpcie_init_d11status(struct dhd_bus * bus)11438 dhdpcie_init_d11status(struct dhd_bus *bus)
11439 {
11440 	return 0;
11441 }
11442 #endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
11443 
11444 #ifdef BCMPCIE_OOB_HOST_WAKE
11445 int
dhd_bus_oob_intr_register(dhd_pub_t * dhdp)11446 dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
11447 {
11448 	return dhdpcie_oob_intr_register(dhdp->bus);
11449 }
11450 
11451 void
dhd_bus_oob_intr_unregister(dhd_pub_t * dhdp)11452 dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
11453 {
11454 	dhdpcie_oob_intr_unregister(dhdp->bus);
11455 }
11456 
11457 void
dhd_bus_oob_intr_set(dhd_pub_t * dhdp,bool enable)11458 dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
11459 {
11460 	dhdpcie_oob_intr_set(dhdp->bus, enable);
11461 }
11462 #endif /* BCMPCIE_OOB_HOST_WAKE */
11463 
11464 bool
dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t * bus)11465 dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus)
11466 {
11467 	return bus->dhd->d2h_hostrdy_supported;
11468 }
11469 
11470 void
dhd_pcie_dump_core_regs(dhd_pub_t * pub,uint32 index,uint32 first_addr,uint32 last_addr)11471 dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr)
11472 {
11473 	dhd_bus_t *bus = pub->bus;
11474 	uint32	coreoffset = index << 12;
11475 	uint32	core_addr = SI_ENUM_BASE(bus->sih) + coreoffset;
11476 	uint32 value;
11477 
11478 	while (first_addr <= last_addr) {
11479 		core_addr = SI_ENUM_BASE(bus->sih) + coreoffset + first_addr;
11480 		if (serialized_backplane_access(bus, core_addr, 4, &value, TRUE) != BCME_OK) {
11481 			DHD_ERROR(("Invalid size/addr combination \n"));
11482 		}
11483 		DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr, value));
11484 		first_addr = first_addr + 4;
11485 	}
11486 }
11487 
11488 bool
dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t * bus)11489 dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus)
11490 {
11491 	if (!bus->dhd)
11492 		return FALSE;
11493 	else if (bus->hwa_enab_bmap) {
11494 		return bus->dhd->hwa_enable;
11495 	} else {
11496 		return FALSE;
11497 	}
11498 }
11499 
11500 bool
dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t * bus)11501 dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus)
11502 {
11503 	if (!bus->dhd)
11504 		return FALSE;
11505 	else if (bus->idma_enabled) {
11506 		return bus->dhd->idma_enable;
11507 	} else {
11508 		return FALSE;
11509 	}
11510 }
11511 
11512 bool
dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t * bus)11513 dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus)
11514 {
11515 	if (!bus->dhd)
11516 		return FALSE;
11517 	else if (bus->ifrm_enabled) {
11518 		return bus->dhd->ifrm_enable;
11519 	} else {
11520 		return FALSE;
11521 	}
11522 }
11523 
11524 bool
dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t * bus)11525 dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus)
11526 {
11527 	if (!bus->dhd) {
11528 		return FALSE;
11529 	} else if (bus->dar_enabled) {
11530 		return bus->dhd->dar_enable;
11531 	} else {
11532 		return FALSE;
11533 	}
11534 }
11535 
11536 void
dhdpcie_bus_enab_pcie_dw(dhd_bus_t * bus,uint8 dw_option)11537 dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option)
11538 {
11539 	DHD_ERROR(("ENABLING DW:%d\n", dw_option));
11540 	bus->dw_option = dw_option;
11541 }
11542 
11543 void
dhd_bus_dump_trap_info(dhd_bus_t * bus,struct bcmstrbuf * strbuf)11544 dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
11545 {
11546 	trap_t *tr = &bus->dhd->last_trap_info;
11547 	bcm_bprintf(strbuf,
11548 		"\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
11549 		" lp 0x%x, rpc 0x%x"
11550 		"\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
11551 		"r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x, r8 0x%x, r9 0x%x, "
11552 		"r10 0x%x, r11 0x%x, r12 0x%x\n\n",
11553 		ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr),
11554 		ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc),
11555 		ltoh32(bus->pcie_sh->trap_addr),
11556 		ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3),
11557 		ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7),
11558 		ltoh32(tr->r8), ltoh32(tr->r9), ltoh32(tr->r10),
11559 		ltoh32(tr->r11), ltoh32(tr->r12));
11560 }
11561 
11562 int
dhd_bus_readwrite_bp_addr(dhd_pub_t * dhdp,uint addr,uint size,uint * data,bool read)11563 dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read)
11564 {
11565 	int bcmerror = 0;
11566 	struct dhd_bus *bus = dhdp->bus;
11567 
11568 	if (serialized_backplane_access(bus, addr, size, data, read) != BCME_OK) {
11569 			DHD_ERROR(("Invalid size/addr combination \n"));
11570 			bcmerror = BCME_ERROR;
11571 	}
11572 
11573 	return bcmerror;
11574 }
11575 
11576 int
dhd_get_idletime(dhd_pub_t * dhd)11577 dhd_get_idletime(dhd_pub_t *dhd)
11578 {
11579 	return dhd->bus->idletime;
11580 }
11581 
11582 static INLINE void
dhd_sbreg_op(dhd_pub_t * dhd,uint addr,uint * val,bool read)11583 dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read)
11584 {
11585 	OSL_DELAY(1);
11586 	if (serialized_backplane_access(dhd->bus, addr, sizeof(uint), val, read) != BCME_OK) {
11587 		DHD_ERROR(("sbreg: Invalid uint addr: 0x%x \n", addr));
11588 	} else {
11589 		DHD_ERROR(("sbreg: addr:0x%x val:0x%x read:%d\n", addr, *val, read));
11590 	}
11591 	return;
11592 }
11593 
11594 #ifdef DHD_SSSR_DUMP
11595 static int
dhdpcie_get_sssr_fifo_dump(dhd_pub_t * dhd,uint * buf,uint fifo_size,uint addr_reg,uint data_reg)11596 dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
11597 	uint addr_reg, uint data_reg)
11598 {
11599 	uint addr;
11600 	uint val = 0;
11601 	int i;
11602 
11603 	DHD_ERROR(("%s\n", __FUNCTION__));
11604 
11605 	if (!buf) {
11606 		DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
11607 		return BCME_ERROR;
11608 	}
11609 
11610 	if (!fifo_size) {
11611 		DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
11612 		return BCME_ERROR;
11613 	}
11614 
11615 	/* Set the base address offset to 0 */
11616 	addr = addr_reg;
11617 	val = 0;
11618 	dhd_sbreg_op(dhd, addr, &val, FALSE);
11619 
11620 	addr = data_reg;
11621 	/* Read 4 bytes at once and loop for fifo_size / 4 */
11622 	for (i = 0; i < fifo_size / 4; i++) {
11623 		if (serialized_backplane_access(dhd->bus, addr,
11624 				sizeof(uint), &val, TRUE) != BCME_OK) {
11625 			DHD_ERROR(("%s: error in serialized_backplane_access\n", __FUNCTION__));
11626 			return BCME_ERROR;
11627 		}
11628 		buf[i] = val;
11629 		OSL_DELAY(1);
11630 	}
11631 	return BCME_OK;
11632 }
11633 
11634 static int
dhdpcie_get_sssr_dig_dump(dhd_pub_t * dhd,uint * buf,uint fifo_size,uint addr_reg)11635 dhdpcie_get_sssr_dig_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
11636 	uint addr_reg)
11637 {
11638 	uint addr;
11639 	uint val = 0;
11640 	int i;
11641 	si_t *sih = dhd->bus->sih;
11642 
11643 	DHD_ERROR(("%s\n", __FUNCTION__));
11644 
11645 	if (!buf) {
11646 		DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
11647 		return BCME_ERROR;
11648 	}
11649 
11650 	if (!fifo_size) {
11651 		DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
11652 		return BCME_ERROR;
11653 	}
11654 
11655 	if (addr_reg) {
11656 
11657 		if ((!dhd->sssr_reg_info.vasip_regs.vasip_sr_size) &&
11658 			dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) {
11659 			int err = dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg, (uint8 *)buf,
11660 				fifo_size);
11661 			if (err != BCME_OK) {
11662 				DHD_ERROR(("%s: Error reading dig dump from dongle !\n",
11663 					__FUNCTION__));
11664 			}
11665 		} else {
11666 			/* Check if vasip clk is disabled, if yes enable it */
11667 			addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
11668 			dhd_sbreg_op(dhd, addr, &val, TRUE);
11669 			if (!val) {
11670 				val = 1;
11671 				dhd_sbreg_op(dhd, addr, &val, FALSE);
11672 			}
11673 
11674 			addr = addr_reg;
11675 			/* Read 4 bytes at once and loop for fifo_size / 4 */
11676 			for (i = 0; i < fifo_size / 4; i++, addr += 4) {
11677 				if (serialized_backplane_access(dhd->bus, addr, sizeof(uint),
11678 					&val, TRUE) != BCME_OK) {
11679 					DHD_ERROR(("%s: Invalid uint addr: 0x%x \n", __FUNCTION__,
11680 						addr));
11681 					return BCME_ERROR;
11682 				}
11683 				buf[i] = val;
11684 				OSL_DELAY(1);
11685 			}
11686 		}
11687 	} else {
11688 		uint cur_coreid;
11689 		uint chipc_corerev;
11690 		chipcregs_t *chipcregs;
11691 
11692 		/* Save the current core */
11693 		cur_coreid = si_coreid(sih);
11694 
11695 		/* Switch to ChipC */
11696 		chipcregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
11697 
11698 		chipc_corerev = si_corerev(sih);
11699 
11700 		if ((chipc_corerev == 64) || (chipc_corerev == 65)) {
11701 			W_REG(si_osh(sih), &chipcregs->sr_memrw_addr, 0);
11702 
11703 			/* Read 4 bytes at once and loop for fifo_size / 4 */
11704 			for (i = 0; i < fifo_size / 4; i++) {
11705 				buf[i] = R_REG(si_osh(sih), &chipcregs->sr_memrw_data);
11706 				OSL_DELAY(1);
11707 			}
11708 		}
11709 
11710 		/* Switch back to the original core */
11711 		si_setcore(sih, cur_coreid, 0);
11712 	}
11713 
11714 	return BCME_OK;
11715 }
11716 
11717 #if defined(EWP_ETD_PRSRV_LOGS)
11718 void
dhdpcie_get_etd_preserve_logs(dhd_pub_t * dhd,uint8 * ext_trap_data,void * event_decode_data)11719 dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd,
11720 		uint8 *ext_trap_data, void *event_decode_data)
11721 {
11722 	hnd_ext_trap_hdr_t *hdr = NULL;
11723 	bcm_tlv_t *tlv;
11724 	eventlog_trapdata_info_t *etd_evtlog = NULL;
11725 	eventlog_trap_buf_info_t *evtlog_buf_arr = NULL;
11726 	uint arr_size = 0;
11727 	int i = 0;
11728 	int err = 0;
11729 	uint32 seqnum = 0;
11730 
11731 	if (!ext_trap_data || !event_decode_data || !dhd)
11732 		return;
11733 
11734 	if (!dhd->concise_dbg_buf)
11735 		return;
11736 
11737 	/* First word is original trap_data, skip */
11738 	ext_trap_data += sizeof(uint32);
11739 
11740 	hdr = (hnd_ext_trap_hdr_t *)ext_trap_data;
11741 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_LOG_DATA);
11742 	if (tlv) {
11743 		uint32 baseaddr = 0;
11744 		uint32 endaddr = dhd->bus->dongle_ram_base + dhd->bus->ramsize - 4;
11745 
11746 		etd_evtlog = (eventlog_trapdata_info_t *)tlv->data;
11747 		DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; "
11748 			"seq_num=%x; log_arr_addr=%x\n", __FUNCTION__,
11749 			(etd_evtlog->num_elements),
11750 			ntoh32(etd_evtlog->seq_num), (etd_evtlog->log_arr_addr)));
11751 		arr_size = (uint32)sizeof(*evtlog_buf_arr) * (etd_evtlog->num_elements);
11752 		if (!arr_size) {
11753 			DHD_ERROR(("%s: num event logs is zero! \n", __FUNCTION__));
11754 			return;
11755 		}
11756 		evtlog_buf_arr = MALLOCZ(dhd->osh, arr_size);
11757 		if (!evtlog_buf_arr) {
11758 			DHD_ERROR(("%s: out of memory !\n",	__FUNCTION__));
11759 			return;
11760 		}
11761 
11762 		/* boundary check */
11763 		baseaddr = etd_evtlog->log_arr_addr;
11764 		if ((baseaddr < dhd->bus->dongle_ram_base) ||
11765 			((baseaddr + arr_size) > endaddr)) {
11766 			DHD_ERROR(("%s: Error reading invalid address\n",
11767 				__FUNCTION__));
11768 			goto err;
11769 		}
11770 
11771 		/* read the eventlog_trap_buf_info_t array from dongle memory */
11772 		err = dhdpcie_bus_membytes(dhd->bus, FALSE,
11773 				(ulong)(etd_evtlog->log_arr_addr),
11774 				(uint8 *)evtlog_buf_arr, arr_size);
11775 		if (err != BCME_OK) {
11776 			DHD_ERROR(("%s: Error reading event log array from dongle !\n",
11777 				__FUNCTION__));
11778 			goto err;
11779 		}
11780 		/* ntoh is required only for seq_num, because in the original
11781 		* case of event logs from info ring, it is sent from dongle in that way
11782 		* so for ETD also dongle follows same convention
11783 		*/
11784 		seqnum = ntoh32(etd_evtlog->seq_num);
11785 		memset(dhd->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
11786 		for (i = 0; i < (etd_evtlog->num_elements); ++i) {
11787 			/* boundary check */
11788 			baseaddr = evtlog_buf_arr[i].buf_addr;
11789 			if ((baseaddr < dhd->bus->dongle_ram_base) ||
11790 				((baseaddr + evtlog_buf_arr[i].len) > endaddr)) {
11791 				DHD_ERROR(("%s: Error reading invalid address\n",
11792 					__FUNCTION__));
11793 				goto err;
11794 			}
11795 			/* read each individual event log buf from dongle memory */
11796 			err = dhdpcie_bus_membytes(dhd->bus, FALSE,
11797 					((ulong)evtlog_buf_arr[i].buf_addr),
11798 					dhd->concise_dbg_buf, (evtlog_buf_arr[i].len));
11799 			if (err != BCME_OK) {
11800 				DHD_ERROR(("%s: Error reading event log buffer from dongle !\n",
11801 					__FUNCTION__));
11802 				goto err;
11803 			}
11804 			dhd_dbg_msgtrace_log_parser(dhd, dhd->concise_dbg_buf,
11805 				event_decode_data, (evtlog_buf_arr[i].len),
11806 				FALSE, hton32(seqnum));
11807 			++seqnum;
11808 		}
11809 err:
11810 		MFREE(dhd->osh, evtlog_buf_arr, arr_size);
11811 	} else {
11812 		DHD_ERROR(("%s: Error getting trap log data in ETD !\n", __FUNCTION__));
11813 	}
11814 }
11815 #endif /* BCMPCIE && DHD_LOG_DUMP */
11816 
11817 static uint32
dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t * dhd,uint32 reg_val)11818 dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd, uint32 reg_val)
11819 {
11820 	uint addr;
11821 	uint val = 0;
11822 
11823 	DHD_ERROR(("%s\n", __FUNCTION__));
11824 
11825 	/* conditionally clear bits [11:8] of PowerCtrl */
11826 	addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
11827 	dhd_sbreg_op(dhd, addr, &val, TRUE);
11828 	if (!(val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask)) {
11829 		addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
11830 		dhd_sbreg_op(dhd, addr, &reg_val, FALSE);
11831 	}
11832 	return BCME_OK;
11833 }
11834 
11835 static uint32
dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t * dhd)11836 dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd)
11837 {
11838 	uint addr;
11839 	uint val = 0, reg_val = 0;
11840 
11841 	DHD_ERROR(("%s\n", __FUNCTION__));
11842 
11843 	/* conditionally clear bits [11:8] of PowerCtrl */
11844 	addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
11845 	dhd_sbreg_op(dhd, addr, &reg_val, TRUE);
11846 	if (reg_val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) {
11847 		addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
11848 		val = 0;
11849 		dhd_sbreg_op(dhd, addr, &val, FALSE);
11850 	}
11851 	return reg_val;
11852 }
11853 
11854 static int
dhdpcie_clear_intmask_and_timer(dhd_pub_t * dhd)11855 dhdpcie_clear_intmask_and_timer(dhd_pub_t *dhd)
11856 {
11857 	uint addr;
11858 	uint val;
11859 
11860 	DHD_ERROR(("%s\n", __FUNCTION__));
11861 
11862 	/* clear chipcommon intmask */
11863 	addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.intmask;
11864 	val = 0x0;
11865 	dhd_sbreg_op(dhd, addr, &val, FALSE);
11866 
11867 	/* clear PMUIntMask0 */
11868 	addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask0;
11869 	val = 0x0;
11870 	dhd_sbreg_op(dhd, addr, &val, FALSE);
11871 
11872 	/* clear PMUIntMask1 */
11873 	addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask1;
11874 	val = 0x0;
11875 	dhd_sbreg_op(dhd, addr, &val, FALSE);
11876 
11877 	/* clear res_req_timer */
11878 	addr = dhd->sssr_reg_info.pmu_regs.base_regs.resreqtimer;
11879 	val = 0x0;
11880 	dhd_sbreg_op(dhd, addr, &val, FALSE);
11881 
11882 	/* clear macresreqtimer */
11883 	addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer;
11884 	val = 0x0;
11885 	dhd_sbreg_op(dhd, addr, &val, FALSE);
11886 
11887 	/* clear macresreqtimer1 */
11888 	addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer1;
11889 	val = 0x0;
11890 	dhd_sbreg_op(dhd, addr, &val, FALSE);
11891 
11892 	/* clear VasipClkEn */
11893 	if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
11894 		addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
11895 		val = 0x0;
11896 		dhd_sbreg_op(dhd, addr, &val, FALSE);
11897 	}
11898 
11899 	return BCME_OK;
11900 }
11901 
11902 static void
dhdpcie_update_d11_status_from_trapdata(dhd_pub_t * dhd)11903 dhdpcie_update_d11_status_from_trapdata(dhd_pub_t *dhd)
11904 {
11905 #define TRAP_DATA_MAIN_CORE_BIT_MASK	(1 << 1)
11906 #define TRAP_DATA_AUX_CORE_BIT_MASK	(1 << 4)
11907 	uint trap_data_mask[MAX_NUM_D11CORES] =
11908 		{TRAP_DATA_MAIN_CORE_BIT_MASK, TRAP_DATA_AUX_CORE_BIT_MASK};
11909 	int i;
11910 	/* Apply only for 4375 chip */
11911 	if (dhd_bus_chip_id(dhd) == BCM4375_CHIP_ID) {
11912 		for (i = 0; i < MAX_NUM_D11CORES; i++) {
11913 			if (dhd->sssr_d11_outofreset[i] &&
11914 				(dhd->dongle_trap_data & trap_data_mask[i])) {
11915 				dhd->sssr_d11_outofreset[i] = TRUE;
11916 			} else {
11917 				dhd->sssr_d11_outofreset[i] = FALSE;
11918 			}
11919 			DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d after AND with "
11920 				"trap_data:0x%x-0x%x\n",
11921 				__FUNCTION__, i, dhd->sssr_d11_outofreset[i],
11922 				dhd->dongle_trap_data, trap_data_mask[i]));
11923 		}
11924 	}
11925 }
11926 
11927 static int
dhdpcie_d11_check_outofreset(dhd_pub_t * dhd)11928 dhdpcie_d11_check_outofreset(dhd_pub_t *dhd)
11929 {
11930 	int i;
11931 	uint addr;
11932 	uint val = 0;
11933 
11934 	DHD_ERROR(("%s\n", __FUNCTION__));
11935 
11936 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
11937 		/* Check if bit 0 of resetctrl is cleared */
11938 		addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
11939 		if (!addr) {
11940 			DHD_ERROR(("%s: skipping for core[%d] as 'addr' is NULL\n",
11941 				__FUNCTION__, i));
11942 			continue;
11943 		}
11944 		dhd_sbreg_op(dhd, addr, &val, TRUE);
11945 		if (!(val & 1)) {
11946 			dhd->sssr_d11_outofreset[i] = TRUE;
11947 		} else {
11948 			dhd->sssr_d11_outofreset[i] = FALSE;
11949 		}
11950 		DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n",
11951 			__FUNCTION__, i, dhd->sssr_d11_outofreset[i]));
11952 	}
11953 	dhdpcie_update_d11_status_from_trapdata(dhd);
11954 
11955 	return BCME_OK;
11956 }
11957 
11958 static int
dhdpcie_d11_clear_clk_req(dhd_pub_t * dhd)11959 dhdpcie_d11_clear_clk_req(dhd_pub_t *dhd)
11960 {
11961 	int i;
11962 	uint addr;
11963 	uint val = 0;
11964 
11965 	DHD_ERROR(("%s\n", __FUNCTION__));
11966 
11967 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
11968 		if (dhd->sssr_d11_outofreset[i]) {
11969 			/* clear request clk only if itopoobb is non zero */
11970 			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.itopoobb;
11971 			dhd_sbreg_op(dhd, addr, &val, TRUE);
11972 			if (val != 0) {
11973 				/* clear clockcontrolstatus */
11974 				addr = dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus;
11975 				val =
11976 				dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus_val;
11977 				dhd_sbreg_op(dhd, addr, &val, FALSE);
11978 			}
11979 		}
11980 	}
11981 	return BCME_OK;
11982 }
11983 
11984 static int
dhdpcie_arm_clear_clk_req(dhd_pub_t * dhd)11985 dhdpcie_arm_clear_clk_req(dhd_pub_t *dhd)
11986 {
11987 	uint addr;
11988 	uint val = 0;
11989 
11990 	DHD_ERROR(("%s\n", __FUNCTION__));
11991 
11992 	/* Check if bit 0 of resetctrl is cleared */
11993 	addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
11994 	dhd_sbreg_op(dhd, addr, &val, TRUE);
11995 	if (!(val & 1)) {
11996 		/* clear request clk only if itopoobb is non zero */
11997 		addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.itopoobb;
11998 		dhd_sbreg_op(dhd, addr, &val, TRUE);
11999 		if (val != 0) {
12000 			/* clear clockcontrolstatus */
12001 			addr = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus;
12002 			val = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus_val;
12003 			dhd_sbreg_op(dhd, addr, &val, FALSE);
12004 		}
12005 	}
12006 	return BCME_OK;
12007 }
12008 
12009 static int
dhdpcie_pcie_clear_clk_req(dhd_pub_t * dhd)12010 dhdpcie_pcie_clear_clk_req(dhd_pub_t *dhd)
12011 {
12012 	uint addr;
12013 	uint val = 0;
12014 
12015 	DHD_ERROR(("%s\n", __FUNCTION__));
12016 
12017 	/* clear request clk only if itopoobb is non zero */
12018 	addr = dhd->sssr_reg_info.pcie_regs.wrapper_regs.itopoobb;
12019 	dhd_sbreg_op(dhd, addr, &val, TRUE);
12020 	if (val) {
12021 		/* clear clockcontrolstatus */
12022 		addr = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus;
12023 		val = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus_val;
12024 		dhd_sbreg_op(dhd, addr, &val, FALSE);
12025 	}
12026 	return BCME_OK;
12027 }
12028 
12029 static int
dhdpcie_pcie_send_ltrsleep(dhd_pub_t * dhd)12030 dhdpcie_pcie_send_ltrsleep(dhd_pub_t *dhd)
12031 {
12032 	uint addr;
12033 	uint val = 0;
12034 
12035 	DHD_ERROR(("%s\n", __FUNCTION__));
12036 
12037 	addr = dhd->sssr_reg_info.pcie_regs.base_regs.ltrstate;
12038 	val = LTR_ACTIVE;
12039 	dhd_sbreg_op(dhd, addr, &val, FALSE);
12040 
12041 	val = LTR_SLEEP;
12042 	dhd_sbreg_op(dhd, addr, &val, FALSE);
12043 
12044 	return BCME_OK;
12045 }
12046 
12047 static int
dhdpcie_clear_clk_req(dhd_pub_t * dhd)12048 dhdpcie_clear_clk_req(dhd_pub_t *dhd)
12049 {
12050 	DHD_ERROR(("%s\n", __FUNCTION__));
12051 
12052 	dhdpcie_arm_clear_clk_req(dhd);
12053 
12054 	dhdpcie_d11_clear_clk_req(dhd);
12055 
12056 	dhdpcie_pcie_clear_clk_req(dhd);
12057 
12058 	return BCME_OK;
12059 }
12060 
12061 static int
dhdpcie_bring_d11_outofreset(dhd_pub_t * dhd)12062 dhdpcie_bring_d11_outofreset(dhd_pub_t *dhd)
12063 {
12064 	int i;
12065 	uint addr;
12066 	uint val = 0;
12067 
12068 	DHD_ERROR(("%s\n", __FUNCTION__));
12069 
12070 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
12071 		if (dhd->sssr_d11_outofreset[i]) {
12072 			/* disable core by setting bit 0 */
12073 			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
12074 			val = 1;
12075 			dhd_sbreg_op(dhd, addr, &val, FALSE);
12076 			OSL_DELAY(6000);
12077 
12078 			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
12079 			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0];
12080 			dhd_sbreg_op(dhd, addr, &val, FALSE);
12081 
12082 			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1];
12083 			dhd_sbreg_op(dhd, addr, &val, FALSE);
12084 
12085 			/* enable core by clearing bit 0 */
12086 			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
12087 			val = 0;
12088 			dhd_sbreg_op(dhd, addr, &val, FALSE);
12089 
12090 			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
12091 			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2];
12092 			dhd_sbreg_op(dhd, addr, &val, FALSE);
12093 
12094 			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3];
12095 			dhd_sbreg_op(dhd, addr, &val, FALSE);
12096 
12097 			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4];
12098 			dhd_sbreg_op(dhd, addr, &val, FALSE);
12099 		}
12100 	}
12101 	return BCME_OK;
12102 }
12103 
12104 static int
dhdpcie_sssr_dump_get_before_sr(dhd_pub_t * dhd)12105 dhdpcie_sssr_dump_get_before_sr(dhd_pub_t *dhd)
12106 {
12107 	int i;
12108 
12109 	DHD_ERROR(("%s\n", __FUNCTION__));
12110 
12111 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
12112 		if (dhd->sssr_d11_outofreset[i]) {
12113 			dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_before[i],
12114 				dhd->sssr_reg_info.mac_regs[i].sr_size,
12115 				dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
12116 				dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
12117 		}
12118 	}
12119 
12120 	if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
12121 		dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
12122 			dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
12123 			dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
12124 	} else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
12125 		dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
12126 		dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
12127 			dhd->sssr_reg_info.dig_mem_info.dig_sr_size,
12128 			dhd->sssr_reg_info.dig_mem_info.dig_sr_addr);
12129 	}
12130 
12131 	return BCME_OK;
12132 }
12133 
12134 static int
dhdpcie_sssr_dump_get_after_sr(dhd_pub_t * dhd)12135 dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd)
12136 {
12137 	int i;
12138 
12139 	DHD_ERROR(("%s\n", __FUNCTION__));
12140 
12141 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
12142 		if (dhd->sssr_d11_outofreset[i]) {
12143 			dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_after[i],
12144 				dhd->sssr_reg_info.mac_regs[i].sr_size,
12145 				dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
12146 				dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
12147 		}
12148 	}
12149 
12150 	if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
12151 		dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after,
12152 			dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
12153 			dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
12154 	} else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
12155 		dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
12156 		dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after,
12157 			dhd->sssr_reg_info.dig_mem_info.dig_sr_size,
12158 			dhd->sssr_reg_info.dig_mem_info.dig_sr_addr);
12159 	}
12160 
12161 	return BCME_OK;
12162 }
12163 
12164 int
dhdpcie_sssr_dump(dhd_pub_t * dhd)12165 dhdpcie_sssr_dump(dhd_pub_t *dhd)
12166 {
12167 	uint32 powerctrl_val;
12168 
12169 	if (!dhd->sssr_inited) {
12170 		DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
12171 		return BCME_ERROR;
12172 	}
12173 
12174 	if (dhd->bus->is_linkdown) {
12175 		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
12176 		return BCME_ERROR;
12177 	}
12178 
12179 	dhdpcie_d11_check_outofreset(dhd);
12180 
12181 	DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__));
12182 	if (dhdpcie_sssr_dump_get_before_sr(dhd) != BCME_OK) {
12183 		DHD_ERROR(("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__));
12184 		return BCME_ERROR;
12185 	}
12186 
12187 	dhdpcie_clear_intmask_and_timer(dhd);
12188 	powerctrl_val = dhdpcie_suspend_chipcommon_powerctrl(dhd);
12189 	dhdpcie_clear_clk_req(dhd);
12190 	dhdpcie_pcie_send_ltrsleep(dhd);
12191 
12192 	/* Wait for some time before Restore */
12193 	OSL_DELAY(6000);
12194 
12195 	dhdpcie_resume_chipcommon_powerctrl(dhd, powerctrl_val);
12196 	dhdpcie_bring_d11_outofreset(dhd);
12197 
12198 	DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
12199 	if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
12200 		DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
12201 		return BCME_ERROR;
12202 	}
12203 	dhd->sssr_dump_collected = TRUE;
12204 	dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_SSSR);
12205 
12206 	return BCME_OK;
12207 }
12208 
12209 static int
dhdpcie_fis_trigger(dhd_pub_t * dhd)12210 dhdpcie_fis_trigger(dhd_pub_t *dhd)
12211 {
12212 	if (!dhd->sssr_inited) {
12213 		DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
12214 		return BCME_ERROR;
12215 	}
12216 
12217 	if (dhd->bus->is_linkdown) {
12218 		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
12219 		return BCME_ERROR;
12220 	}
12221 
12222 	/* Trigger FIS */
12223 	si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12224 		DAR_FIS_CTRL(dhd->bus->sih->buscorerev), ~0, DAR_FIS_START_MASK);
12225 	OSL_DELAY(100 * 1000);
12226 
12227 	return BCME_OK;
12228 }
12229 
12230 int
dhd_bus_fis_trigger(dhd_pub_t * dhd)12231 dhd_bus_fis_trigger(dhd_pub_t *dhd)
12232 {
12233 	return dhdpcie_fis_trigger(dhd);
12234 }
12235 
12236 static int
dhdpcie_fis_dump(dhd_pub_t * dhd)12237 dhdpcie_fis_dump(dhd_pub_t *dhd)
12238 {
12239 	int i;
12240 
12241 	if (!dhd->sssr_inited) {
12242 		DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
12243 		return BCME_ERROR;
12244 	}
12245 
12246 	if (dhd->bus->is_linkdown) {
12247 		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
12248 		return BCME_ERROR;
12249 	}
12250 
12251 	/* bring up all pmu resources */
12252 	PMU_REG(dhd->bus->sih, min_res_mask, ~0,
12253 		PMU_REG(dhd->bus->sih, max_res_mask, 0, 0));
12254 	OSL_DELAY(10 * 1000);
12255 
12256 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
12257 		dhd->sssr_d11_outofreset[i] = TRUE;
12258 	}
12259 
12260 	dhdpcie_bring_d11_outofreset(dhd);
12261 	OSL_DELAY(6000);
12262 
12263 	/* clear FIS Done */
12264 	PMU_REG(dhd->bus->sih, fis_ctrl_status, PMU_CLEAR_FIS_DONE_MASK, PMU_CLEAR_FIS_DONE_MASK);
12265 
12266 	dhdpcie_d11_check_outofreset(dhd);
12267 
12268 	DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
12269 	if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
12270 		DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
12271 		return BCME_ERROR;
12272 	}
12273 
12274 	dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_FIS);
12275 
12276 	return BCME_OK;
12277 }
12278 
12279 int
dhd_bus_fis_dump(dhd_pub_t * dhd)12280 dhd_bus_fis_dump(dhd_pub_t *dhd)
12281 {
12282 	return dhdpcie_fis_dump(dhd);
12283 }
12284 #endif /* DHD_SSSR_DUMP */
12285 
12286 #ifdef DHD_WAKE_STATUS
12287 wake_counts_t*
dhd_bus_get_wakecount(dhd_pub_t * dhd)12288 dhd_bus_get_wakecount(dhd_pub_t *dhd)
12289 {
12290 	return &dhd->bus->wake_counts;
12291 }
12292 int
dhd_bus_get_bus_wake(dhd_pub_t * dhd)12293 dhd_bus_get_bus_wake(dhd_pub_t *dhd)
12294 {
12295 	return bcmpcie_set_get_wake(dhd->bus, 0);
12296 }
12297 #endif /* DHD_WAKE_STATUS */
12298 
12299 /* Writes random number(s) to the TCM. FW upon initialization reads this register
12300  * to fetch the random number, and uses it to randomize heap address space layout.
12301  */
12302 static int
dhdpcie_wrt_rnd(struct dhd_bus * bus)12303 dhdpcie_wrt_rnd(struct dhd_bus *bus)
12304 {
12305 	bcm_rand_metadata_t rnd_data;
12306 	uint8 rand_buf[BCM_ENTROPY_HOST_NBYTES];
12307 	uint32 count = BCM_ENTROPY_HOST_NBYTES;
12308 	int ret = 0;
12309 	uint32 addr = bus->dongle_ram_base + (bus->ramsize - BCM_NVRAM_OFFSET_TCM) -
12310 		((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data));
12311 
12312 	memset(rand_buf, 0, BCM_ENTROPY_HOST_NBYTES);
12313 	rnd_data.signature = htol32(BCM_NVRAM_RNG_SIGNATURE);
12314 	rnd_data.count = htol32(count);
12315 	/* write the metadata about random number */
12316 	dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rnd_data, sizeof(rnd_data));
12317 	/* scale back by number of random number counts */
12318 	addr -= count;
12319 
12320 #ifdef DHD_RND_DEBUG
12321 	bus->dhd->rnd_buf = NULL;
12322 	/* get random contents from file */
12323 	ret = dhd_get_rnd_info(bus->dhd);
12324 	if (bus->dhd->rnd_buf) {
12325 		/* write file contents to TCM */
12326 		DHD_ERROR(("%s: use stored .rnd.in content\n", __FUNCTION__));
12327 		dhdpcie_bus_membytes(bus, TRUE, addr, bus->dhd->rnd_buf, bus->dhd->rnd_len);
12328 
12329 		/* Dump random content to out file */
12330 		dhd_dump_rnd_info(bus->dhd, bus->dhd->rnd_buf, bus->dhd->rnd_len);
12331 
12332 		/* bus->dhd->rnd_buf is allocated in dhd_get_rnd_info, free here */
12333 		MFREE(bus->dhd->osh, bus->dhd->rnd_buf, bus->dhd->rnd_len);
12334 		bus->dhd->rnd_buf = NULL;
12335 		return BCME_OK;
12336 	}
12337 #endif /* DHD_RND_DEBUG */
12338 
12339 	/* Now get & write the random number(s) */
12340 	ret = dhd_get_random_bytes(rand_buf, count);
12341 	if (ret != BCME_OK) {
12342 		return ret;
12343 	}
12344 	dhdpcie_bus_membytes(bus, TRUE, addr, rand_buf, count);
12345 
12346 #ifdef DHD_RND_DEBUG
12347 	/* Dump random content to out file */
12348 	dhd_dump_rnd_info(bus->dhd, rand_buf, count);
12349 #endif /* DHD_RND_DEBUG */
12350 
12351 	return BCME_OK;
12352 }
12353 
12354 void
dhd_pcie_intr_count_dump(dhd_pub_t * dhd)12355 dhd_pcie_intr_count_dump(dhd_pub_t *dhd)
12356 {
12357 	struct dhd_bus *bus = dhd->bus;
12358 	uint64 current_time;
12359 
12360 	DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters  ------- \r\n"));
12361 	DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n",
12362 		bus->resume_intr_enable_count, bus->dpc_intr_enable_count));
12363 	DHD_ERROR(("isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n",
12364 		bus->isr_intr_disable_count, bus->suspend_intr_disable_count));
12365 #ifdef BCMPCIE_OOB_HOST_WAKE
12366 	DHD_ERROR(("oob_intr_count=%lu oob_intr_enable_count=%lu oob_intr_disable_count=%lu\n",
12367 		bus->oob_intr_count, bus->oob_intr_enable_count,
12368 		bus->oob_intr_disable_count));
12369 	DHD_ERROR(("oob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT"\n",
12370 		dhdpcie_get_oob_irq_num(bus),
12371 		GET_SEC_USEC(bus->last_oob_irq_time)));
12372 	DHD_ERROR(("last_oob_irq_enable_time="SEC_USEC_FMT
12373 		" last_oob_irq_disable_time="SEC_USEC_FMT"\n",
12374 		GET_SEC_USEC(bus->last_oob_irq_enable_time),
12375 		GET_SEC_USEC(bus->last_oob_irq_disable_time)));
12376 	DHD_ERROR(("oob_irq_enabled=%d oob_gpio_level=%d\n",
12377 		dhdpcie_get_oob_irq_status(bus),
12378 		dhdpcie_get_oob_irq_level()));
12379 #endif /* BCMPCIE_OOB_HOST_WAKE */
12380 	DHD_ERROR(("dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
12381 		bus->dpc_return_busdown_count, bus->non_ours_irq_count));
12382 
12383 	current_time = OSL_LOCALTIME_NS();
12384 	DHD_ERROR(("\ncurrent_time="SEC_USEC_FMT"\n",
12385 		GET_SEC_USEC(current_time)));
12386 	DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
12387 		" isr_exit_time="SEC_USEC_FMT"\n",
12388 		GET_SEC_USEC(bus->isr_entry_time),
12389 		GET_SEC_USEC(bus->isr_exit_time)));
12390 	DHD_ERROR(("dpc_sched_time="SEC_USEC_FMT
12391 		" last_non_ours_irq_time="SEC_USEC_FMT"\n",
12392 		GET_SEC_USEC(bus->dpc_sched_time),
12393 		GET_SEC_USEC(bus->last_non_ours_irq_time)));
12394 	DHD_ERROR(("dpc_entry_time="SEC_USEC_FMT
12395 		" last_process_ctrlbuf_time="SEC_USEC_FMT"\n",
12396 		GET_SEC_USEC(bus->dpc_entry_time),
12397 		GET_SEC_USEC(bus->last_process_ctrlbuf_time)));
12398 	DHD_ERROR(("last_process_flowring_time="SEC_USEC_FMT
12399 		" last_process_txcpl_time="SEC_USEC_FMT"\n",
12400 		GET_SEC_USEC(bus->last_process_flowring_time),
12401 		GET_SEC_USEC(bus->last_process_txcpl_time)));
12402 	DHD_ERROR(("last_process_rxcpl_time="SEC_USEC_FMT
12403 		" last_process_infocpl_time="SEC_USEC_FMT
12404 		" last_process_edl_time="SEC_USEC_FMT"\n",
12405 		GET_SEC_USEC(bus->last_process_rxcpl_time),
12406 		GET_SEC_USEC(bus->last_process_infocpl_time),
12407 		GET_SEC_USEC(bus->last_process_edl_time)));
12408 	DHD_ERROR(("dpc_exit_time="SEC_USEC_FMT
12409 		" resched_dpc_time="SEC_USEC_FMT"\n",
12410 		GET_SEC_USEC(bus->dpc_exit_time),
12411 		GET_SEC_USEC(bus->resched_dpc_time)));
12412 	DHD_ERROR(("last_d3_inform_time="SEC_USEC_FMT"\n",
12413 		GET_SEC_USEC(bus->last_d3_inform_time)));
12414 
12415 	DHD_ERROR(("\nlast_suspend_start_time="SEC_USEC_FMT
12416 		" last_suspend_end_time="SEC_USEC_FMT"\n",
12417 		GET_SEC_USEC(bus->last_suspend_start_time),
12418 		GET_SEC_USEC(bus->last_suspend_end_time)));
12419 	DHD_ERROR(("last_resume_start_time="SEC_USEC_FMT
12420 		" last_resume_end_time="SEC_USEC_FMT"\n",
12421 		GET_SEC_USEC(bus->last_resume_start_time),
12422 		GET_SEC_USEC(bus->last_resume_end_time)));
12423 
12424 #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
12425 	DHD_ERROR(("logtrace_thread_entry_time="SEC_USEC_FMT
12426 		" logtrace_thread_sem_down_time="SEC_USEC_FMT
12427 		"\nlogtrace_thread_flush_time="SEC_USEC_FMT
12428 		" logtrace_thread_unexpected_break_time="SEC_USEC_FMT
12429 		"\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
12430 		GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
12431 		GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
12432 		GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
12433 		GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
12434 		GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time)));
12435 #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
12436 }
12437 
12438 void
dhd_bus_intr_count_dump(dhd_pub_t * dhd)12439 dhd_bus_intr_count_dump(dhd_pub_t *dhd)
12440 {
12441 	dhd_pcie_intr_count_dump(dhd);
12442 }
12443 
12444 int
dhd_pcie_dump_wrapper_regs(dhd_pub_t * dhd)12445 dhd_pcie_dump_wrapper_regs(dhd_pub_t *dhd)
12446 {
12447 	uint32 save_idx, val;
12448 	si_t *sih = dhd->bus->sih;
12449 	uint32 oob_base, oob_base1;
12450 	uint32 wrapper_dump_list[] = {
12451 		AI_OOBSELOUTA30, AI_OOBSELOUTA74, AI_OOBSELOUTB30, AI_OOBSELOUTB74,
12452 		AI_OOBSELOUTC30, AI_OOBSELOUTC74, AI_OOBSELOUTD30, AI_OOBSELOUTD74,
12453 		AI_RESETSTATUS, AI_RESETCTRL,
12454 		AI_ITIPOOBA, AI_ITIPOOBB, AI_ITIPOOBC, AI_ITIPOOBD,
12455 		AI_ITIPOOBAOUT, AI_ITIPOOBBOUT, AI_ITIPOOBCOUT, AI_ITIPOOBDOUT
12456 	};
12457 	uint8 i;
12458 	hndoobr_reg_t *reg;
12459 	cr4regs_t *cr4regs;
12460 	ca7regs_t *ca7regs;
12461 
12462 	save_idx = si_coreidx(sih);
12463 
12464 	DHD_ERROR(("%s: Master wrapper Reg\n", __FUNCTION__));
12465 
12466 	if (si_setcore(sih, PCIE2_CORE_ID, 0) != NULL) {
12467 		for (i = 0; i < sizeof(wrapper_dump_list) / 4; i++) {
12468 			val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
12469 			DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
12470 		}
12471 	}
12472 
12473 	if ((cr4regs = si_setcore(sih, ARMCR4_CORE_ID, 0)) != NULL) {
12474 		DHD_ERROR(("%s: ARM CR4 wrapper Reg\n", __FUNCTION__));
12475 		for (i = 0; i < sizeof(wrapper_dump_list) / 4; i++) {
12476 			val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
12477 			DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
12478 		}
12479 		DHD_ERROR(("%s: ARM CR4 core Reg\n", __FUNCTION__));
12480 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecontrol));
12481 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corecontrol), val));
12482 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecapabilities));
12483 		DHD_ERROR(("reg:0x%x val:0x%x\n",
12484 			(uint)OFFSETOF(cr4regs_t, corecapabilities), val));
12485 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corestatus));
12486 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corestatus), val));
12487 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmiisrst));
12488 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmiisrst), val));
12489 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmimask));
12490 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmimask), val));
12491 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, isrmask));
12492 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, isrmask), val));
12493 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, swintreg));
12494 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, swintreg), val));
12495 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, intstatus));
12496 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, intstatus), val));
12497 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, cyclecnt));
12498 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, cyclecnt), val));
12499 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, inttimer));
12500 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, inttimer), val));
12501 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, clk_ctl_st));
12502 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, clk_ctl_st), val));
12503 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, powerctl));
12504 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, powerctl), val));
12505 	}
12506 
12507 	if ((ca7regs = si_setcore(sih, ARMCA7_CORE_ID, 0)) != NULL) {
12508 		DHD_ERROR(("%s: ARM CA7 core Reg\n", __FUNCTION__));
12509 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecontrol));
12510 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corecontrol), val));
12511 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecapabilities));
12512 		DHD_ERROR(("reg:0x%x val:0x%x\n",
12513 			(uint)OFFSETOF(ca7regs_t, corecapabilities), val));
12514 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corestatus));
12515 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corestatus), val));
12516 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, tracecontrol));
12517 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, tracecontrol), val));
12518 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, clk_ctl_st));
12519 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, clk_ctl_st), val));
12520 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, powerctl));
12521 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, powerctl), val));
12522 	}
12523 
12524 	DHD_ERROR(("%s: OOBR Reg\n", __FUNCTION__));
12525 
12526 	oob_base = si_oobr_baseaddr(sih, FALSE);
12527 	oob_base1 = si_oobr_baseaddr(sih, TRUE);
12528 	if (oob_base) {
12529 		dhd_sbreg_op(dhd, oob_base + OOB_STATUSA, &val, TRUE);
12530 		dhd_sbreg_op(dhd, oob_base + OOB_STATUSB, &val, TRUE);
12531 		dhd_sbreg_op(dhd, oob_base + OOB_STATUSC, &val, TRUE);
12532 		dhd_sbreg_op(dhd, oob_base + OOB_STATUSD, &val, TRUE);
12533 	} else if ((reg = si_setcore(sih, HND_OOBR_CORE_ID, 0)) != NULL) {
12534 		val = R_REG(dhd->osh, &reg->intstatus[0]);
12535 		DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
12536 		val = R_REG(dhd->osh, &reg->intstatus[1]);
12537 		DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
12538 		val = R_REG(dhd->osh, &reg->intstatus[2]);
12539 		DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
12540 		val = R_REG(dhd->osh, &reg->intstatus[3]);
12541 		DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
12542 	}
12543 
12544 	if (oob_base1) {
12545 		DHD_ERROR(("%s: Second OOBR Reg\n", __FUNCTION__));
12546 
12547 		dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSA, &val, TRUE);
12548 		dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSB, &val, TRUE);
12549 		dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSC, &val, TRUE);
12550 		dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSD, &val, TRUE);
12551 	}
12552 
12553 	si_setcoreidx(dhd->bus->sih, save_idx);
12554 
12555 	return 0;
12556 }
12557 
12558 int
dhd_pcie_dma_info_dump(dhd_pub_t * dhd)12559 dhd_pcie_dma_info_dump(dhd_pub_t *dhd)
12560 {
12561 	if (dhd->bus->is_linkdown) {
12562 		DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers "
12563 			"due to PCIe link down ------- \r\n"));
12564 		return 0;
12565 	}
12566 
12567 	DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n"));
12568 
12569 	//HostToDev
12570 	DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
12571 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0),
12572 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0)));
12573 	DHD_ERROR(("            : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
12574 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0),
12575 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0)));
12576 	DHD_ERROR(("            : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
12577 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0),
12578 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0)));
12579 
12580 	DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
12581 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0),
12582 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0)));
12583 	DHD_ERROR(("            : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
12584 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0),
12585 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0)));
12586 	DHD_ERROR(("            : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
12587 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0),
12588 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0)));
12589 
12590 	//DevToHost
12591 	DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
12592 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0),
12593 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0)));
12594 	DHD_ERROR(("            : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
12595 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0),
12596 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0)));
12597 	DHD_ERROR(("            : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
12598 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0),
12599 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0)));
12600 
12601 	DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
12602 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0),
12603 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0)));
12604 	DHD_ERROR(("            : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
12605 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0),
12606 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0)));
12607 	DHD_ERROR(("            : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
12608 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0),
12609 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0)));
12610 
12611 	return 0;
12612 }
12613 
12614 bool
dhd_pcie_dump_int_regs(dhd_pub_t * dhd)12615 dhd_pcie_dump_int_regs(dhd_pub_t *dhd)
12616 {
12617 	uint32 intstatus = 0;
12618 	uint32 intmask = 0;
12619 	uint32 d2h_db0 = 0;
12620 	uint32 d2h_mb_data = 0;
12621 
12622 	DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
12623 	intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12624 		dhd->bus->pcie_mailbox_int, 0, 0);
12625 	if (intstatus == (uint32)-1) {
12626 		DHD_ERROR(("intstatus=0x%x \n", intstatus));
12627 		return FALSE;
12628 	}
12629 
12630 	intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12631 		dhd->bus->pcie_mailbox_mask, 0, 0);
12632 	if (intmask == (uint32) -1) {
12633 		DHD_ERROR(("intstatus=0x%x intmask=0x%x \n", intstatus, intmask));
12634 		return FALSE;
12635 	}
12636 
12637 	d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12638 		PCID2H_MailBox, 0, 0);
12639 	if (d2h_db0 == (uint32)-1) {
12640 		DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
12641 		intstatus, intmask, d2h_db0));
12642 		return FALSE;
12643 	}
12644 
12645 	DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
12646 		intstatus, intmask, d2h_db0));
12647 	dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
12648 	DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data,
12649 		dhd->bus->def_intmask));
12650 
12651 	return TRUE;
12652 }
12653 
12654 void
dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t * dhd)12655 dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd)
12656 {
12657 	DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n"));
12658 	DHD_ERROR(("Pcie RC Uncorrectable Error Status Val=0x%x\n",
12659 		dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
12660 		PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
12661 #ifdef EXTENDED_PCIE_DEBUG_DUMP
12662 	DHD_ERROR(("hdrlog0 =0x%08x hdrlog1 =0x%08x hdrlog2 =0x%08x hdrlog3 =0x%08x\n",
12663 		dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
12664 		PCIE_EXTCAP_ERR_HEADER_LOG_0, TRUE, FALSE, 0),
12665 		dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
12666 		PCIE_EXTCAP_ERR_HEADER_LOG_1, TRUE, FALSE, 0),
12667 		dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
12668 		PCIE_EXTCAP_ERR_HEADER_LOG_2, TRUE, FALSE, 0),
12669 		dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
12670 		PCIE_EXTCAP_ERR_HEADER_LOG_3, TRUE, FALSE, 0)));
12671 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
12672 }
12673 
12674 int
dhd_pcie_debug_info_dump(dhd_pub_t * dhd)12675 dhd_pcie_debug_info_dump(dhd_pub_t *dhd)
12676 {
12677 	int host_irq_disabled;
12678 
12679 	DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd->bus->bus_low_power_state));
12680 	host_irq_disabled = dhdpcie_irq_disabled(dhd->bus);
12681 	DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled));
12682 	dhd_print_tasklet_status(dhd);
12683 	dhd_pcie_intr_count_dump(dhd);
12684 
12685 	DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n"));
12686 	dhdpcie_dump_resource(dhd->bus);
12687 
12688 	dhd_pcie_dump_rc_conf_space_cap(dhd);
12689 
12690 	DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n",
12691 		dhd_debug_get_rc_linkcap(dhd->bus)));
12692 
12693 	if (dhd->bus->is_linkdown && !dhd->bus->cto_triggered) {
12694 		DHD_ERROR(("Skip dumping the PCIe Config and Core registers. "
12695 			"link may be DOWN\n"));
12696 		return 0;
12697 	}
12698 
12699 	DHD_ERROR(("\n ------- DUMPING PCIE EP config space Registers ------- \r\n"));
12700 	DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x BaseAddress1(0x%x)=0x%x "
12701 		"PCIE_CFG_PMCSR(0x%x)=0x%x\n",
12702 		PCIECFGREG_STATUS_CMD,
12703 		dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_STATUS_CMD, sizeof(uint32)),
12704 		PCIECFGREG_BASEADDR0,
12705 		dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)),
12706 		PCIECFGREG_BASEADDR1,
12707 		dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32)),
12708 		PCIE_CFG_PMCSR,
12709 		dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_PMCSR, sizeof(uint32))));
12710 	DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x "
12711 		"L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL,
12712 		dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_LINK_STATUS_CTRL,
12713 		sizeof(uint32)), PCIECFGGEN_DEV_STATUS_CTRL2,
12714 		dhd_pcie_config_read(dhd->bus->osh, PCIECFGGEN_DEV_STATUS_CTRL2,
12715 		sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL1,
12716 		dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL1,
12717 		sizeof(uint32))));
12718 #ifdef EXTENDED_PCIE_DEBUG_DUMP
12719 	DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n",
12720 		dhdpcie_ep_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
12721 		PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
12722 	DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x "
12723 		"hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1,
12724 		dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG1, sizeof(uint32)),
12725 		PCI_TLP_HDR_LOG2,
12726 		dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG2, sizeof(uint32)),
12727 		PCI_TLP_HDR_LOG3,
12728 		dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG3, sizeof(uint32)),
12729 		PCI_TLP_HDR_LOG4,
12730 		dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG4, sizeof(uint32))));
12731 	if (dhd->bus->sih->buscorerev >= 24) {
12732 		DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x "
12733 			"L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL,
12734 			dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_DEV_STATUS_CTRL,
12735 			sizeof(uint32)), PCIE_CFG_SUBSYSTEM_CONTROL,
12736 			dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL,
12737 			sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL2,
12738 			dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL2,
12739 			sizeof(uint32))));
12740 		dhd_bus_dump_dar_registers(dhd->bus);
12741 	}
12742 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
12743 
12744 	if (dhd->bus->is_linkdown) {
12745 		DHD_ERROR(("Skip dumping the PCIe Core registers. link may be DOWN\n"));
12746 		return 0;
12747 	}
12748 
12749 	DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n"));
12750 
12751 	DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
12752 		"ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0,
12753 		dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0),
12754 		PCIECFGREG_PHY_DBG_CLKREQ1,
12755 		dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1),
12756 		PCIECFGREG_PHY_DBG_CLKREQ2,
12757 		dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2),
12758 		PCIECFGREG_PHY_DBG_CLKREQ3,
12759 		dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3)));
12760 
12761 #ifdef EXTENDED_PCIE_DEBUG_DUMP
12762 	if (dhd->bus->sih->buscorerev >= 24) {
12763 
12764 		DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x "
12765 			"ltssm_hist_2(0x%x)=0x%x "
12766 			"ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0,
12767 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_0),
12768 			PCIECFGREG_PHY_LTSSM_HIST_1,
12769 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_1),
12770 			PCIECFGREG_PHY_LTSSM_HIST_2,
12771 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_2),
12772 			PCIECFGREG_PHY_LTSSM_HIST_3,
12773 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_3)));
12774 
12775 		DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n",
12776 			PCIECFGREG_TREFUP,
12777 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP),
12778 			PCIECFGREG_TREFUP_EXT,
12779 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP_EXT)));
12780 		DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x "
12781 			"Function_Intstatus(0x%x)=0x%x "
12782 			"Function_Intmask(0x%x)=0x%x Power_Intstatus(0x%x)=0x%x "
12783 			"Power_Intmask(0x%x)=0x%x\n",
12784 			PCIE_CORE_REG_ERRLOG,
12785 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12786 			PCIE_CORE_REG_ERRLOG, 0, 0),
12787 			PCIE_CORE_REG_ERR_ADDR,
12788 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12789 				PCIE_CORE_REG_ERR_ADDR, 0, 0),
12790 			PCIFunctionIntstatus(dhd->bus->sih->buscorerev),
12791 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12792 				PCIFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0),
12793 			PCIFunctionIntmask(dhd->bus->sih->buscorerev),
12794 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12795 				PCIFunctionIntmask(dhd->bus->sih->buscorerev), 0, 0),
12796 			PCIPowerIntstatus(dhd->bus->sih->buscorerev),
12797 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12798 				PCIPowerIntstatus(dhd->bus->sih->buscorerev), 0, 0),
12799 			PCIPowerIntmask(dhd->bus->sih->buscorerev),
12800 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12801 				PCIPowerIntmask(dhd->bus->sih->buscorerev), 0, 0)));
12802 		DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x "
12803 			"err_hdrlog3(0x%x)=0x%x err_hdrlog4(0x%x)=0x%x\n",
12804 			(uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1),
12805 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12806 				OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), 0, 0),
12807 			(uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2),
12808 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12809 				OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), 0, 0),
12810 			(uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3),
12811 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12812 				OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), 0, 0),
12813 			(uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4),
12814 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12815 				OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), 0, 0)));
12816 		DHD_ERROR(("err_code(0x%x)=0x%x\n",
12817 			(uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg),
12818 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12819 				OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), 0, 0)));
12820 
12821 		dhd_pcie_dump_wrapper_regs(dhd);
12822 	}
12823 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
12824 
12825 	dhd_pcie_dma_info_dump(dhd);
12826 
12827 	return 0;
12828 }
12829 
12830 bool
dhd_bus_force_bt_quiesce_enabled(struct dhd_bus * bus)12831 dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus)
12832 {
12833 	return bus->force_bt_quiesce;
12834 }
12835 
12836 #ifdef DHD_HP2P
12837 uint16
dhd_bus_get_hp2p_ring_max_size(struct dhd_bus * bus,bool tx)12838 dhd_bus_get_hp2p_ring_max_size(struct dhd_bus *bus, bool tx)
12839 {
12840 	if (tx)
12841 		return bus->hp2p_txcpl_max_items;
12842 	else
12843 		return bus->hp2p_rxcpl_max_items;
12844 }
12845 
12846 static uint16
dhd_bus_set_hp2p_ring_max_size(struct dhd_bus * bus,bool tx,uint16 val)12847 dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val)
12848 {
12849 	if (tx)
12850 		bus->hp2p_txcpl_max_items = val;
12851 	else
12852 		bus->hp2p_rxcpl_max_items = val;
12853 	return val;
12854 }
12855 #endif /* DHD_HP2P */
12856 
12857 static bool
dhd_bus_tcm_test(struct dhd_bus * bus)12858 dhd_bus_tcm_test(struct dhd_bus *bus)
12859 {
12860 	int ret = 0;
12861 	int size; /* Full mem size */
12862 	int start; /* Start address */
12863 	int read_size = 0; /* Read size of each iteration */
12864 	int num = 0;
12865 	uint8 *read_buf, *write_buf;
12866 	uint8 init_val[NUM_PATTERNS] = {
12867 		0xFFu, /* 11111111 */
12868 		0x00u, /* 00000000 */
12869 	};
12870 
12871 	if (!bus) {
12872 		DHD_ERROR(("%s: bus is NULL !\n", __FUNCTION__));
12873 		return FALSE;
12874 	}
12875 
12876 	read_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
12877 
12878 	if (!read_buf) {
12879 		DHD_ERROR(("%s: MALLOC of read_buf failed\n", __FUNCTION__));
12880 		return FALSE;
12881 	}
12882 
12883 	write_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
12884 
12885 	if (!write_buf) {
12886 		MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
12887 		DHD_ERROR(("%s: MALLOC of write_buf failed\n", __FUNCTION__));
12888 		return FALSE;
12889 	}
12890 
12891 	DHD_ERROR(("%s: start %x,  size: %x\n", __FUNCTION__, bus->dongle_ram_base, bus->ramsize));
12892 	DHD_ERROR(("%s: memblock size %d,  #pattern %d\n", __FUNCTION__, MEMBLOCK, NUM_PATTERNS));
12893 
12894 	while (num < NUM_PATTERNS) {
12895 		start = bus->dongle_ram_base;
12896 		/* Get full mem size */
12897 		size = bus->ramsize;
12898 
12899 		memset(write_buf, init_val[num], MEMBLOCK);
12900 		while (size > 0) {
12901 			read_size = MIN(MEMBLOCK, size);
12902 			memset(read_buf, 0, read_size);
12903 
12904 			/* Write */
12905 			if ((ret = dhdpcie_bus_membytes(bus, TRUE, start, write_buf, read_size))) {
12906 				DHD_ERROR(("%s: Write Error membytes %d\n", __FUNCTION__, ret));
12907 				MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
12908 				MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
12909 				return FALSE;
12910 			}
12911 
12912 			/* Read */
12913 			if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, read_buf, read_size))) {
12914 				DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret));
12915 				MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
12916 				MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
12917 				return FALSE;
12918 			}
12919 
12920 			/* Compare */
12921 			if (memcmp(read_buf, write_buf, read_size)) {
12922 				DHD_ERROR(("%s: Mismatch at %x, iter : %d\n",
12923 					__FUNCTION__, start, num));
12924 				prhex("Readbuf", read_buf, read_size);
12925 				prhex("Writebuf", write_buf, read_size);
12926 				MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
12927 				MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
12928 				return FALSE;
12929 			}
12930 
12931 			/* Decrement size and increment start address */
12932 			size -= read_size;
12933 			start += read_size;
12934 		}
12935 		num++;
12936 	}
12937 
12938 	MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
12939 	MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
12940 
12941 	DHD_ERROR(("%s: Success iter : %d\n", __FUNCTION__, num));
12942 	return TRUE;
12943 }
12944