xref: /OK3568_Linux_fs/external/rkwifibt/drivers/infineon/dhd_pcie.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * DHD Bus Module for PCIE
3  *
4  * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
5  *
6  * Copyright (C) 1999-2017, Broadcom Corporation
7  *
8  *      Unless you and Broadcom execute a separate written software license
9  * agreement governing use of this software, this software is licensed to you
10  * under the terms of the GNU General Public License version 2 (the "GPL"),
11  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12  * following added to such license:
13  *
14  *      As a special exception, the copyright holders of this software give you
15  * permission to link this software with independent modules, and to copy and
16  * distribute the resulting executable under terms of your choice, provided that
17  * you also meet, for each linked independent module, the terms and conditions of
18  * the license of that module.  An independent module is a module which is not
19  * derived from this software.  The special exception does not apply to any
20  * modifications of the software.
21  *
22  *      Notwithstanding the above, under no circumstances may you combine this
23  * software in any way with any other Broadcom software provided under a license
24  * other than the GPL, without Broadcom's express prior written consent.
25  *
26  *
27  * <<Broadcom-WL-IPTag/Open:>>
28  *
29  * $Id: dhd_pcie.c 702835 2017-06-05 07:19:55Z $
30  */
31 
32 /* include files */
33 #include <typedefs.h>
34 #include <bcmutils.h>
35 #include <bcmdevs.h>
36 #include <siutils.h>
37 #include <sbpcmcia.h>
38 #include <hndoobr.h>
39 #include <hndsoc.h>
40 #include <hndpmu.h>
41 #include <etd.h>
42 #include <hnd_debug.h>
43 #include <sbchipc.h>
44 #include <sbhndarm.h>
45 #include <hnd_armtrap.h>
46 #if defined(DHD_DEBUG)
47 #include <hnd_cons.h>
48 #endif /* defined(DHD_DEBUG) */
49 #include <dngl_stats.h>
50 #include <pcie_core.h>
51 #include <dhd.h>
52 #include <dhd_bus.h>
53 #include <dhd_flowring.h>
54 #include <dhd_proto.h>
55 #include <dhd_dbg.h>
56 #include <dhd_debug.h>
57 #include <dhd_daemon.h>
58 #include <dhdioctl.h>
59 #include <sdiovar.h>
60 #include <bcmmsgbuf.h>
61 #include <pcicfg.h>
62 #include <dhd_pcie.h>
63 #include <bcmpcie.h>
64 #include <bcmendian.h>
65 #include <bcmstdlib_s.h>
66 #ifdef DHDTCPACK_SUPPRESS
67 #include <dhd_ip.h>
68 #endif /* DHDTCPACK_SUPPRESS */
69 #include <bcmevent.h>
70 #include <trxhdr.h>
71 
72 extern uint32 hw_module_variant;
73 #include <pcie_core.h>
74 
75 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
76 #include <linux/pm_runtime.h>
77 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
78 
79 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
80 #include <debugger.h>
81 #endif /* DEBUGGER || DHD_DSCOPE */
82 
83 #ifdef DNGL_AXI_ERROR_LOGGING
84 #include <dhd_linux_wq.h>
85 #include <dhd_linux.h>
86 #endif /* DNGL_AXI_ERROR_LOGGING */
87 
88 #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
89 #include <dhd_linux_priv.h>
90 #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
91 
92 #include <otpdefs.h>
93 #define EXTENDED_PCIE_DEBUG_DUMP 1	/* Enable Extended pcie registers dump */
94 
95 #define MEMBLOCK	2048		/* Block size used for downloading of dongle image */
96 #define MAX_WKLK_IDLE_CHECK	3	/* times wake_lock checked before deciding not to suspend */
97 
98 #define	DHD_MAX_ITEMS_HPP_TXCPL_RING	512
99 #define	DHD_MAX_ITEMS_HPP_RXCPL_RING	512
100 
101 #define ARMCR4REG_CORECAP	(0x4/sizeof(uint32))
102 #define ARMCR4REG_MPUCTRL	(0x90/sizeof(uint32))
103 #define ACC_MPU_SHIFT		25
104 #define ACC_MPU_MASK		(0x1u << ACC_MPU_SHIFT)
105 
106 #define ARMCR4REG_BANKIDX	(0x40/sizeof(uint32))
107 #define ARMCR4REG_BANKPDA	(0x4C/sizeof(uint32))
108 /* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
109 
110 /* CTO Prevention Recovery */
111 #ifdef BCMQT_HW
112 #define CTO_TO_CLEAR_WAIT_MS 10000
113 #define CTO_TO_CLEAR_WAIT_MAX_CNT 100
114 #else
115 #define CTO_TO_CLEAR_WAIT_MS 1000
116 #define CTO_TO_CLEAR_WAIT_MAX_CNT 10
117 #endif // endif
118 
119 /* Fetch address of a member in the pciedev_shared structure in dongle memory */
120 #define DHD_PCIE_SHARED_MEMBER_ADDR(bus, member) \
121 	(bus)->shared_addr + OFFSETOF(pciedev_shared_t, member)
122 
123 /* Fetch address of a member in rings_info_ptr structure in dongle memory */
124 #define DHD_RING_INFO_MEMBER_ADDR(bus, member) \
125 	(bus)->pcie_sh->rings_info_ptr + OFFSETOF(ring_info_t, member)
126 
127 /* Fetch address of a member in the ring_mem structure in dongle memory */
128 #define DHD_RING_MEM_MEMBER_ADDR(bus, ringid, member) \
129 	(bus)->ring_sh[ringid].ring_mem_addr + OFFSETOF(ring_mem_t, member)
130 
131 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
132 	extern unsigned int system_rev;
133 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
134 
135 /* DHD module parameter */
136 extern uint32 hw_module_variant;
137 
138 #ifdef EWP_EDL
139 extern int host_edl_support;
140 #endif // endif
141 
142 #define D2H_HS_START_STATE (1 << D2H_START_SHIFT)
143 #define D2H_HS_READY_STATE (1 << D2H_START_SHIFT | 1 << D2H_READY_SHIFT)
144 
145 /* This can be overwritten by module parameter(dma_ring_indices) defined in dhd_linux.c */
146 uint dma_ring_indices = 0;
147 /* This can be overwritten by module parameter(h2d_phase) defined in dhd_linux.c */
148 bool h2d_phase = 0;
149 /* This can be overwritten by module parameter(force_trap_bad_h2d_phase)
150  * defined in dhd_linux.c
151  */
152 bool force_trap_bad_h2d_phase = 0;
153 
154 int dhd_dongle_memsize;
155 int dhd_dongle_ramsize;
156 struct dhd_bus *g_dhd_bus = NULL;
157 #ifdef DNGL_AXI_ERROR_LOGGING
158 static void dhd_log_dump_axi_error(uint8 *axi_err);
159 #endif /* DNGL_AXI_ERROR_LOGGING */
160 
161 static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
162 static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
163 #if defined(DHD_FW_COREDUMP)
164 static int dhdpcie_mem_dump(dhd_bus_t *bus);
165 static int dhdpcie_get_mem_dump(dhd_bus_t *bus);
166 #endif /* DHD_FW_COREDUMP */
167 
168 static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
169 static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid,
170 	const char *name, void *params,
171 	int plen, void *arg, int len, int val_size);
172 static int dhdpcie_bus_lpback_req(struct  dhd_bus *bus, uint32 intval);
173 static int dhdpcie_bus_dmaxfer_req(struct  dhd_bus *bus,
174 	uint32 len, uint32 srcdelay, uint32 destdelay,
175 	uint32 d11_lpbk, uint32 core_num, uint32 wait);
176 static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
177 static int dhdpcie_handshake_msg_reg_write(si_t *sih, osl_t *osh, volatile void *addr,
178 	uint *buffer);
179 static int dhdpcie_handshake_msg_reg_read(si_t *sih, osl_t *osh, volatile void *addr,
180 	uint *buffer);
181 static int dhdpcie_dongle_host_handshake_spinwait(si_t *sih, osl_t *osh, volatile void *addr,
182 	uint32 bitshift, uint32 us);
183 static int dhdpcie_dongle_host_get_handshake_address(si_t *sih, osl_t *osh, hs_addrs_t *addr);
184 static int dhdpcie_dongle_host_pre_handshake(si_t *sih, osl_t *osh, hs_addrs_t *addr);
185 static int dhdpcie_dongle_host_post_handshake(si_t *sih, osl_t *osh, hs_addrs_t *addr);
186 static int dhdpcie_dongle_host_chk_validation(si_t *sih, osl_t *osh, hs_addrs_t *addr);
187 int dhdpcie_dongle_host_pre_wd_reset_sequence(si_t *sih, osl_t *osh);
188 int dhdpcie_dongle_host_post_wd_reset_sequence(si_t *sih, osl_t *osh);
189 int dhdpcie_dongle_host_pre_chipid_access_sequence(osl_t *osh, volatile void *regva);
190 static int dhdpcie_dongle_host_post_varswrite(dhd_bus_t *bus, hs_addrs_t *addr);
191 static int _dhdpcie_download_firmware(struct dhd_bus *bus);
192 static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
193 static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
194 static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
195 static bool dhdpci_bus_read_frames(dhd_bus_t *bus);
196 static int dhdpcie_readshared_console(dhd_bus_t *bus);
197 static int dhdpcie_readshared(dhd_bus_t *bus);
198 static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
199 static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
200 static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size);
201 static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
202 	bool dongle_isolation, bool reset_flag);
203 static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
204 static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
205 static void dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr);
206 static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
207 static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
208 static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
209 static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset);
210 static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
211 static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
212 #ifdef DHD_SUPPORT_64BIT
213 static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data) __attribute__ ((used));
214 static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset) __attribute__ ((used));
215 #endif /* DHD_SUPPORT_64BIT */
216 static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
217 static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size);
218 static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
219 static int dhdpcie_sromotp_customvar(dhd_bus_t *bus, uint32 *customvar1, uint32 *customvar2);
220 static void dhdpcie_fw_trap(dhd_bus_t *bus);
221 static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
222 static void dhdpcie_handle_mb_data(dhd_bus_t *bus);
223 extern void dhd_dpc_enable(dhd_pub_t *dhdp);
224 extern void dhd_dpc_kill(dhd_pub_t *dhdp);
225 
226 #ifdef IDLE_TX_FLOW_MGMT
227 static void dhd_bus_check_idle_scan(dhd_bus_t *bus);
228 static void dhd_bus_idle_scan(dhd_bus_t *bus);
229 #endif /* IDLE_TX_FLOW_MGMT */
230 
231 #ifdef EXYNOS_PCIE_DEBUG
232 extern void exynos_pcie_register_dump(int ch_num);
233 #endif /* EXYNOS_PCIE_DEBUG */
234 
235 #if defined(DHD_H2D_LOG_TIME_SYNC)
236 static void dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus);
237 #endif /* DHD_H2D_LOG_TIME_SYNC */
238 
239 #define     PCI_VENDOR_ID_BROADCOM          0x14e4
240 #define     PCI_VENDOR_ID_CYPRESS           0x12be
241 
242 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
243 #define MAX_D3_ACK_TIMEOUT	100
244 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
245 
246 #define DHD_DEFAULT_DOORBELL_TIMEOUT 200	/* ms */
247 static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version);
248 static int dhdpcie_cto_error_recovery(struct dhd_bus *bus);
249 
250 static int dhdpcie_init_d11status(struct dhd_bus *bus);
251 
252 static int dhdpcie_wrt_rnd(struct dhd_bus *bus);
253 
254 extern uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd);
255 extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost);
256 
257 #ifdef DHD_HP2P
258 extern enum hrtimer_restart dhd_hp2p_write(struct hrtimer *timer);
259 static uint16 dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val);
260 #endif // endif
261 #define NUM_PATTERNS 2
262 static bool dhd_bus_tcm_test(struct dhd_bus *bus);
263 
264 /* IOVar table */
265 enum {
266 	IOV_INTR = 1,
267 #ifdef DHD_BUS_MEM_ACCESS
268 	IOV_MEMBYTES,
269 #endif /* DHD_BUS_MEM_ACCESS */
270 	IOV_MEMSIZE,
271 	IOV_SET_DOWNLOAD_STATE,
272 	IOV_DEVRESET,
273 	IOV_VARS,
274 	IOV_MSI_SIM,
275 	IOV_PCIE_LPBK,
276 	IOV_CC_NVMSHADOW,
277 	IOV_RAMSIZE,
278 	IOV_RAMSTART,
279 	IOV_SLEEP_ALLOWED,
280 	IOV_PCIE_DMAXFER,
281 	IOV_PCIE_SUSPEND,
282 #ifdef DHD_PCIE_REG_ACCESS
283 	IOV_PCIEREG,
284 	IOV_PCIECFGREG,
285 	IOV_PCIECOREREG,
286 	IOV_PCIESERDESREG,
287 	IOV_PCIEASPM,
288 	IOV_BAR0_SECWIN_REG,
289 	IOV_SBREG,
290 #endif /* DHD_PCIE_REG_ACCESS */
291 	IOV_DONGLEISOLATION,
292 	IOV_LTRSLEEPON_UNLOOAD,
293 	IOV_METADATA_DBG,
294 	IOV_RX_METADATALEN,
295 	IOV_TX_METADATALEN,
296 	IOV_TXP_THRESHOLD,
297 	IOV_BUZZZ_DUMP,
298 	IOV_DUMP_RINGUPD_BLOCK,
299 	IOV_DMA_RINGINDICES,
300 	IOV_FORCE_FW_TRAP,
301 	IOV_DB1_FOR_MB,
302 	IOV_FLOW_PRIO_MAP,
303 #ifdef DHD_PCIE_RUNTIMEPM
304 	IOV_IDLETIME,
305 #endif /* DHD_PCIE_RUNTIMEPM */
306 	IOV_RXBOUND,
307 	IOV_TXBOUND,
308 	IOV_HANGREPORT,
309 	IOV_H2D_MAILBOXDATA,
310 	IOV_INFORINGS,
311 	IOV_H2D_PHASE,
312 	IOV_H2D_ENABLE_TRAP_BADPHASE,
313 	IOV_H2D_TXPOST_MAX_ITEM,
314 	IOV_TRAPDATA,
315 	IOV_TRAPDATA_RAW,
316 	IOV_CTO_PREVENTION,
317 	IOV_PCIE_WD_RESET,
318 	IOV_DUMP_DONGLE,
319 	IOV_HWA_ENAB_BMAP,
320 	IOV_IDMA_ENABLE,
321 	IOV_IFRM_ENABLE,
322 	IOV_CLEAR_RING,
323 	IOV_DAR_ENABLE,
324 	IOV_DNGL_CAPS,   /**< returns string with dongle capabilities */
325 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
326 	IOV_GDB_SERVER,  /**< starts gdb server on given interface */
327 #endif /* DEBUGGER || DHD_DSCOPE */
328 	IOV_INB_DW_ENABLE,
329 	IOV_CTO_THRESHOLD,
330 	IOV_HSCBSIZE, /* get HSCB buffer size */
331 #ifdef DHD_BUS_MEM_ACCESS
332 	IOV_HSCBBYTES, /* copy HSCB buffer */
333 #endif // endif
334 	IOV_HP2P_ENABLE,
335 	IOV_HP2P_PKT_THRESHOLD,
336 	IOV_HP2P_TIME_THRESHOLD,
337 	IOV_HP2P_PKT_EXPIRY,
338 	IOV_HP2P_TXCPL_MAXITEMS,
339 	IOV_HP2P_RXCPL_MAXITEMS,
340 	IOV_EXTDTXS_IN_TXCPL,
341 	IOV_HOSTRDY_AFTER_INIT,
342 	IOV_PCIE_LAST /**< unused IOVAR */
343 };
344 
345 const bcm_iovar_t dhdpcie_iovars[] = {
346 	{"intr",	IOV_INTR,	0, 	0, IOVT_BOOL,	0 },
347 #ifdef DHD_BUS_MEM_ACCESS
348 	{"membytes",	IOV_MEMBYTES,	0, 	0, IOVT_BUFFER,	2 * sizeof(int) },
349 #endif /* DHD_BUS_MEM_ACCESS */
350 	{"memsize",	IOV_MEMSIZE,	0, 	0, IOVT_UINT32,	0 },
351 	{"dwnldstate",	IOV_SET_DOWNLOAD_STATE,	0, 	0, IOVT_BOOL,	0 },
352 	{"vars",	IOV_VARS,	0, 	0, IOVT_BUFFER,	0 },
353 	{"devreset",	IOV_DEVRESET,	0, 	0, IOVT_UINT8,	0 },
354 	{"pcie_device_trap", IOV_FORCE_FW_TRAP, 0, 	0, 0,	0 },
355 	{"pcie_lpbk",	IOV_PCIE_LPBK,	0,	0, IOVT_UINT32,	0 },
356 	{"cc_nvmshadow", IOV_CC_NVMSHADOW, 0,	0, IOVT_BUFFER, 0 },
357 	{"ramsize",	IOV_RAMSIZE,	0, 	0, IOVT_UINT32,	0 },
358 	{"ramstart",	IOV_RAMSTART,	0, 	0, IOVT_UINT32,	0 },
359 #ifdef DHD_PCIE_REG_ACCESS
360 	{"pciereg",	IOV_PCIEREG,	0, 	0, IOVT_BUFFER,	2 * sizeof(int32) },
361 	{"pciecfgreg",	IOV_PCIECFGREG,	0, 	0, IOVT_BUFFER,	2 * sizeof(int32) },
362 	{"pciecorereg",	IOV_PCIECOREREG,	0, 	0, IOVT_BUFFER,	2 * sizeof(int32) },
363 	{"pcieserdesreg",	IOV_PCIESERDESREG,	0, 	0, IOVT_BUFFER,	3 * sizeof(int32) },
364 	{"bar0secwinreg",	IOV_BAR0_SECWIN_REG,	0, 	0, IOVT_BUFFER,	sizeof(sdreg_t) },
365 	{"sbreg",	IOV_SBREG,	0,	0, IOVT_BUFFER,	sizeof(uint8) },
366 #endif /* DHD_PCIE_REG_ACCESS */
367 	{"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, 0, IOVT_BUFFER, sizeof(dma_xfer_info_t)},
368 	{"pcie_suspend", IOV_PCIE_SUSPEND,	DHD_IOVF_PWRREQ_BYPASS,	0, IOVT_UINT32,	0 },
369 	{"sleep_allowed",	IOV_SLEEP_ALLOWED,	0,	0, IOVT_BOOL,	0 },
370 	{"dngl_isolation", IOV_DONGLEISOLATION,	0, 	0, IOVT_UINT32,	0 },
371 	{"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD,	0,	0, IOVT_UINT32,	0 },
372 	{"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK,	0, 	0, IOVT_BUFFER,	0 },
373 	{"dma_ring_indices", IOV_DMA_RINGINDICES,	0, 	0, IOVT_UINT32,	0},
374 	{"metadata_dbg", IOV_METADATA_DBG,	0,	0, IOVT_BOOL,	0 },
375 	{"rx_metadata_len", IOV_RX_METADATALEN,	0, 	0, IOVT_UINT32,	0 },
376 	{"tx_metadata_len", IOV_TX_METADATALEN,	0, 	0, IOVT_UINT32,	0 },
377 	{"db1_for_mb", IOV_DB1_FOR_MB,	0, 	0, IOVT_UINT32,	0 },
378 	{"txp_thresh", IOV_TXP_THRESHOLD,	0,	0, IOVT_UINT32,	0 },
379 	{"buzzz_dump", IOV_BUZZZ_DUMP,		0, 	0, IOVT_UINT32,	0 },
380 	{"flow_prio_map", IOV_FLOW_PRIO_MAP,	0, 	0, IOVT_UINT32,	0 },
381 #ifdef DHD_PCIE_RUNTIMEPM
382 	{"idletime",    IOV_IDLETIME,   0,	0, IOVT_INT32,     0 },
383 #endif /* DHD_PCIE_RUNTIMEPM */
384 	{"rxbound",     IOV_RXBOUND,    0, 0,	IOVT_UINT32,    0 },
385 	{"txbound",     IOV_TXBOUND,    0, 0,	IOVT_UINT32,    0 },
386 #ifdef DHD_PCIE_REG_ACCESS
387 	{"aspm", IOV_PCIEASPM, 0, 0, IOVT_INT32, 0 },
388 #endif /* DHD_PCIE_REG_ACCESS */
389 	{"fw_hang_report", IOV_HANGREPORT,	0, 0,	IOVT_BOOL,	0 },
390 	{"h2d_mb_data",     IOV_H2D_MAILBOXDATA,    0, 0,      IOVT_UINT32,    0 },
391 	{"inforings",   IOV_INFORINGS,    0, 0,      IOVT_UINT32,    0 },
392 	{"h2d_phase",   IOV_H2D_PHASE,    0, 0,      IOVT_UINT32,    0 },
393 	{"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE,    0, 0,
394 	IOVT_UINT32,    0 },
395 	{"h2d_max_txpost",   IOV_H2D_TXPOST_MAX_ITEM,    0, 0,      IOVT_UINT32,    0 },
396 	{"trap_data",	IOV_TRAPDATA,	0, 0,	IOVT_BUFFER,	0 },
397 	{"trap_data_raw",	IOV_TRAPDATA_RAW,	0, 0,	IOVT_BUFFER,	0 },
398 	{"cto_prevention",	IOV_CTO_PREVENTION,	0, 0,	IOVT_UINT32,	0 },
399 	{"pcie_wd_reset",	IOV_PCIE_WD_RESET,	0,	0, IOVT_BOOL,	0 },
400 	{"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
401 	MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t))},
402 	{"clear_ring",   IOV_CLEAR_RING,    0, 0,  IOVT_UINT32,    0 },
403 	{"hwa_enab_bmap",   IOV_HWA_ENAB_BMAP,    0, 0,  IOVT_UINT32,    0 },
404 	{"idma_enable",   IOV_IDMA_ENABLE,    0, 0,  IOVT_UINT32,    0 },
405 	{"ifrm_enable",   IOV_IFRM_ENABLE,    0, 0,  IOVT_UINT32,    0 },
406 	{"dar_enable",   IOV_DAR_ENABLE,    0, 0,  IOVT_UINT32,    0 },
407 	{"cap", IOV_DNGL_CAPS,	0, 0, IOVT_BUFFER,	0},
408 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
409 	{"gdb_server", IOV_GDB_SERVER,    0, 0,      IOVT_UINT32,    0 },
410 #endif /* DEBUGGER || DHD_DSCOPE */
411 	{"inb_dw_enable",   IOV_INB_DW_ENABLE,    0, 0,  IOVT_UINT32,    0 },
412 	{"cto_threshold",	IOV_CTO_THRESHOLD,	0,	0, IOVT_UINT32,	0 },
413 	{"hscbsize",	IOV_HSCBSIZE,	0,	0,	IOVT_UINT32,	0 },
414 #ifdef DHD_BUS_MEM_ACCESS
415 	{"hscbbytes",	IOV_HSCBBYTES,	0,	0,	IOVT_BUFFER,	2 * sizeof(int32) },
416 #endif // endif
417 #ifdef DHD_HP2P
418 	{"hp2p_enable", IOV_HP2P_ENABLE,	0,	0, IOVT_UINT32,	0 },
419 	{"hp2p_pkt_thresh", IOV_HP2P_PKT_THRESHOLD,	0,	0, IOVT_UINT32,	0 },
420 	{"hp2p_time_thresh", IOV_HP2P_TIME_THRESHOLD,	0,	0, IOVT_UINT32,	0 },
421 	{"hp2p_pkt_expiry", IOV_HP2P_PKT_EXPIRY,	0,	0, IOVT_UINT32,	0 },
422 	{"hp2p_txcpl_maxitems", IOV_HP2P_TXCPL_MAXITEMS,	0,	0, IOVT_UINT32,	0 },
423 	{"hp2p_rxcpl_maxitems", IOV_HP2P_RXCPL_MAXITEMS,	0,	0, IOVT_UINT32,	0 },
424 #endif // endif
425 	{"extdtxs_in_txcpl", IOV_EXTDTXS_IN_TXCPL,	0,	0, IOVT_UINT32,	0 },
426 	{"hostrdy_after_init", IOV_HOSTRDY_AFTER_INIT,	0,	0, IOVT_UINT32,	0 },
427 	{NULL, 0, 0, 0, 0, 0 }
428 };
429 
430 #ifdef BCMQT
431 #define MAX_READ_TIMEOUT	200 * 1000 * 1000
432 #else
433 #define MAX_READ_TIMEOUT	5 * 1000 * 1000
434 #endif // endif
435 
436 #ifndef DHD_RXBOUND
437 #define DHD_RXBOUND		64
438 #endif // endif
439 #ifndef DHD_TXBOUND
440 #define DHD_TXBOUND		64
441 #endif // endif
442 
443 #define DHD_INFORING_BOUND	32
444 #define DHD_BTLOGRING_BOUND	32
445 
446 uint dhd_rxbound = DHD_RXBOUND;
447 uint dhd_txbound = DHD_TXBOUND;
448 
449 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
450 /** the GDB debugger layer will call back into this (bus) layer to read/write dongle memory */
451 static struct dhd_gdb_bus_ops_s  bus_ops = {
452 	.read_u16 = dhdpcie_bus_rtcm16,
453 	.read_u32 = dhdpcie_bus_rtcm32,
454 	.write_u32 = dhdpcie_bus_wtcm32,
455 };
456 #endif /* DEBUGGER || DHD_DSCOPE */
457 
458 bool
dhd_bus_get_flr_force_fail(struct dhd_bus * bus)459 dhd_bus_get_flr_force_fail(struct dhd_bus *bus)
460 {
461 	return bus->flr_force_fail;
462 }
463 
464 /**
465  * Register/Unregister functions are called by the main DHD entry point (eg module insertion) to
466  * link with the bus driver, in order to look for or await the device.
467  */
468 int
dhd_bus_register(void)469 dhd_bus_register(void)
470 {
471 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
472 
473 	return dhdpcie_bus_register();
474 }
475 
476 void
dhd_bus_unregister(void)477 dhd_bus_unregister(void)
478 {
479 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
480 
481 	dhdpcie_bus_unregister();
482 	return;
483 }
484 
485 /** returns a host virtual address */
486 uint32 *
dhdpcie_bus_reg_map(osl_t * osh,ulong addr,int size)487 dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
488 {
489 	return (uint32 *)REG_MAP(addr, size);
490 }
491 
492 void
dhdpcie_bus_reg_unmap(osl_t * osh,volatile char * addr,int size)493 dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size)
494 {
495 	REG_UNMAP(addr);
496 	return;
497 }
498 
499 /**
500  * retrun H2D Doorbell registers address
501  * use DAR registers instead of enum register for corerev >= 23 (4347B0)
502  */
503 static INLINE uint
dhd_bus_db0_addr_get(struct dhd_bus * bus)504 dhd_bus_db0_addr_get(struct dhd_bus *bus)
505 {
506 	uint addr = PCIH2D_MailBox;
507 	uint dar_addr = DAR_PCIH2D_DB0_0(bus->sih->buscorerev);
508 
509 	return ((DAR_ACTIVE(bus->dhd)) ? dar_addr : addr);
510 }
511 
512 static INLINE uint
dhd_bus_db0_addr_2_get(struct dhd_bus * bus)513 dhd_bus_db0_addr_2_get(struct dhd_bus *bus)
514 {
515 	return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB2_0(bus->sih->buscorerev) : PCIH2D_MailBox_2);
516 }
517 
518 static INLINE uint
dhd_bus_db1_addr_get(struct dhd_bus * bus)519 dhd_bus_db1_addr_get(struct dhd_bus *bus)
520 {
521 	return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB0_1(bus->sih->buscorerev) : PCIH2D_DB1);
522 }
523 
524 static INLINE uint
dhd_bus_db1_addr_1_get(struct dhd_bus * bus)525 dhd_bus_db1_addr_1_get(struct dhd_bus *bus)
526 {
527 	return ((DAR_ACTIVE(bus->dhd)) ? DAR_PCIH2D_DB1_1(bus->sih->buscorerev) : PCIH2D_DB1_1);
528 }
529 
530 /*
531  * WAR for SWWLAN-215055 - [4378B0] ARM fails to boot without DAR WL domain request
532  */
533 static INLINE void
dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus * bus,bool enable)534 dhd_bus_pcie_pwr_req_wl_domain(struct dhd_bus *bus, bool enable)
535 {
536 	if (enable) {
537 		si_corereg(bus->sih, bus->sih->buscoreidx,
538 			DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev),
539 			SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT,
540 			SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT);
541 	} else {
542 		si_corereg(bus->sih, bus->sih->buscoreidx,
543 			DAR_PCIE_PWR_CTRL((bus->sih)->buscorerev),
544 			SRPWR_DMN1_ARMBPSD_MASK << SRPWR_REQON_SHIFT, 0);
545 	}
546 }
547 
548 static INLINE void
_dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus * bus)549 _dhd_bus_pcie_pwr_req_clear_cmn(struct dhd_bus *bus)
550 {
551 	uint mask;
552 
553 	/*
554 	 * If multiple de-asserts, decrement ref and return
555 	 * Clear power request when only one pending
556 	 * so initial request is not removed unexpectedly
557 	 */
558 	if (bus->pwr_req_ref > 1) {
559 		bus->pwr_req_ref--;
560 		return;
561 	}
562 
563 	ASSERT(bus->pwr_req_ref == 1);
564 
565 	if (MULTIBP_ENAB(bus->sih)) {
566 		/* Common BP controlled by HW so only need to toggle WL/ARM backplane */
567 		mask = SRPWR_DMN1_ARMBPSD_MASK;
568 	} else {
569 		mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
570 	}
571 
572 	si_srpwr_request(bus->sih, mask, 0);
573 	bus->pwr_req_ref = 0;
574 }
575 
576 static INLINE void
dhd_bus_pcie_pwr_req_clear(struct dhd_bus * bus)577 dhd_bus_pcie_pwr_req_clear(struct dhd_bus *bus)
578 {
579 	unsigned long flags = 0;
580 
581 	DHD_GENERAL_LOCK(bus->dhd, flags);
582 	_dhd_bus_pcie_pwr_req_clear_cmn(bus);
583 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
584 }
585 
586 static INLINE void
dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus * bus)587 dhd_bus_pcie_pwr_req_clear_nolock(struct dhd_bus *bus)
588 {
589 	_dhd_bus_pcie_pwr_req_clear_cmn(bus);
590 }
591 
592 static INLINE void
_dhd_bus_pcie_pwr_req_cmn(struct dhd_bus * bus)593 _dhd_bus_pcie_pwr_req_cmn(struct dhd_bus *bus)
594 {
595 	uint mask, val;
596 
597 	/* If multiple request entries, increment reference and return */
598 	if (bus->pwr_req_ref > 0) {
599 		bus->pwr_req_ref++;
600 		return;
601 	}
602 
603 	ASSERT(bus->pwr_req_ref == 0);
604 
605 	if (MULTIBP_ENAB(bus->sih)) {
606 		/* Common BP controlled by HW so only need to toggle WL/ARM backplane */
607 		mask = SRPWR_DMN1_ARMBPSD_MASK;
608 		val = SRPWR_DMN1_ARMBPSD_MASK;
609 	} else {
610 		mask = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
611 		val = SRPWR_DMN0_PCIE_MASK | SRPWR_DMN1_ARMBPSD_MASK;
612 	}
613 
614 	si_srpwr_request(bus->sih, mask, val);
615 
616 	bus->pwr_req_ref = 1;
617 }
618 
619 static INLINE void
dhd_bus_pcie_pwr_req(struct dhd_bus * bus)620 dhd_bus_pcie_pwr_req(struct dhd_bus *bus)
621 {
622 	unsigned long flags = 0;
623 
624 	DHD_GENERAL_LOCK(bus->dhd, flags);
625 	_dhd_bus_pcie_pwr_req_cmn(bus);
626 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
627 }
628 
629 static INLINE void
_dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus * bus)630 _dhd_bus_pcie_pwr_req_pd0123_cmn(struct dhd_bus *bus)
631 {
632 	uint mask, val;
633 
634 	mask = SRPWR_DMN_ALL_MASK(bus->sih);
635 	val = SRPWR_DMN_ALL_MASK(bus->sih);
636 
637 	si_srpwr_request(bus->sih, mask, val);
638 }
639 
640 static INLINE void
dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus * bus)641 dhd_bus_pcie_pwr_req_reload_war(struct dhd_bus *bus)
642 {
643 	unsigned long flags = 0;
644 
645 	DHD_GENERAL_LOCK(bus->dhd, flags);
646 	_dhd_bus_pcie_pwr_req_pd0123_cmn(bus);
647 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
648 }
649 
650 static INLINE void
_dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus * bus)651 _dhd_bus_pcie_pwr_req_clear_pd0123_cmn(struct dhd_bus *bus)
652 {
653 	uint mask;
654 
655 	mask = SRPWR_DMN_ALL_MASK(bus->sih);
656 
657 	si_srpwr_request(bus->sih, mask, 0);
658 }
659 
660 static INLINE void
dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus * bus)661 dhd_bus_pcie_pwr_req_clear_reload_war(struct dhd_bus *bus)
662 {
663 	unsigned long flags = 0;
664 
665 	DHD_GENERAL_LOCK(bus->dhd, flags);
666 	_dhd_bus_pcie_pwr_req_clear_pd0123_cmn(bus);
667 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
668 }
669 
670 static INLINE void
dhd_bus_pcie_pwr_req_nolock(struct dhd_bus * bus)671 dhd_bus_pcie_pwr_req_nolock(struct dhd_bus *bus)
672 {
673 	_dhd_bus_pcie_pwr_req_cmn(bus);
674 }
675 
676 bool
dhdpcie_chip_support_msi(dhd_bus_t * bus)677 dhdpcie_chip_support_msi(dhd_bus_t *bus)
678 {
679 	DHD_ERROR(("%s: buscorerev=%d chipid=0x%x\n",
680 		__FUNCTION__, bus->sih->buscorerev, si_chipid(bus->sih)));
681 	if (bus->sih->buscorerev <= 14 ||
682 		si_chipid(bus->sih) == BCM4375_CHIP_ID ||
683 		si_chipid(bus->sih) == BCM4362_CHIP_ID ||
684 		si_chipid(bus->sih) == BCM43751_CHIP_ID ||
685 		si_chipid(bus->sih) == BCM4361_CHIP_ID ||
686 		si_chipid(bus->sih) == CYW55560_CHIP_ID) {
687 		return FALSE;
688 	} else {
689 		return TRUE;
690 	}
691 }
692 
693 /**
694  * Called once for each hardware (dongle) instance that this DHD manages.
695  *
696  * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
697  * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
698  * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
699  *
700  * 'tcm' is the *host* virtual address at which tcm is mapped.
701  */
dhdpcie_bus_attach(osl_t * osh,dhd_bus_t ** bus_ptr,volatile char * regs,volatile char * tcm,void * pci_dev)702 int dhdpcie_bus_attach(osl_t *osh, dhd_bus_t **bus_ptr,
703 	volatile char *regs, volatile char *tcm, void *pci_dev)
704 {
705 	dhd_bus_t *bus = NULL;
706 	int ret = BCME_OK;
707 	/* customvar1 and customvar2 are customer configurable CIS tuples in OTP.
708 	* In dual chip (PCIE) scenario, customvar2 is used as a hint to detect
709 	* the chip variants and load the right firmware and NVRAM
710 	*/
711 	/* Below vars are set to 0x0 as OTPed value can not take 0x0 */
712 	uint32 customvar1 = 0x0;
713 	uint32 customvar2 = 0x0;
714 	uint32 otp_hw_module_variant = 0x0;
715 	DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
716 
717 	do {
718 		if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) {
719 			DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
720 			ret = BCME_NORESOURCE;
721 			break;
722 		}
723 
724 		bus->regs = regs;
725 		bus->tcm = tcm;
726 		bus->osh = osh;
727 		/* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */
728 		bus->dev = (struct pci_dev *)pci_dev;
729 
730 		dll_init(&bus->flowring_active_list);
731 #ifdef IDLE_TX_FLOW_MGMT
732 		bus->active_list_last_process_ts = OSL_SYSUPTIME();
733 #endif /* IDLE_TX_FLOW_MGMT */
734 
735 		/* Attach pcie shared structure */
736 		if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) {
737 			DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__));
738 			ret = BCME_NORESOURCE;
739 			break;
740 		}
741 
742 		/* dhd_common_init(osh); */
743 
744 		if (dhdpcie_dongle_attach(bus)) {
745 			DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
746 			ret = BCME_NOTREADY;
747 			break;
748 		}
749 
750 		if (!hw_module_variant) {
751 			/* For single wifi module */
752 			goto enumerate_module;
753 		}
754 
755 		/* read otp variable customvar and store in dhd->customvar1 and dhd->customvar2 */
756 		if (dhdpcie_sromotp_customvar(bus, &customvar1, &customvar2)) {
757 			DHD_ERROR(("%s: dhdpcie_sromotp_customvar failed\n", __FUNCTION__));
758 			break;
759 		}
760 		if (!customvar2) {
761 			DHD_ERROR(("%s:customvar2 is not OTPed"
762 				   "hw_module_variant=0x%x\n",
763 				   __FUNCTION__, hw_module_variant));
764 			goto enumerate_module;
765 		}
766 		/* customvar2=0xNNMMLLKK, LL is module variant */
767 		otp_hw_module_variant = (customvar2 >> 8) & 0xFF;
768 		DHD_TRACE(("%s hw_module_variant=0x%x and"
769 			"OTPed-module_variant=0x%x\n", __func__,
770 			 hw_module_variant, otp_hw_module_variant));
771 		if (hw_module_variant != otp_hw_module_variant) {
772 			DHD_ERROR(("%s: Not going to enumerate this module as "
773 				"hw_module_variant=0x%x and "
774 				"OTPed-module_variant=0x%x didn't match\n",
775 				__FUNCTION__, hw_module_variant, otp_hw_module_variant));
776 			break;
777 		}
778 		DHD_TRACE(("%s: Going to enumerate this module as "
779 			"hw_module_variant=0x%x and "
780 			"OTPed-module_variant=0x%x match\n",
781 			__FUNCTION__, hw_module_variant, otp_hw_module_variant));
782 enumerate_module:
783 		/* software resources */
784 		if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
785 			DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
786 			break;
787 		}
788 
789 		DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
790 		bus->dhd->busstate = DHD_BUS_DOWN;
791 		bus->dhd->hostrdy_after_init = TRUE;
792 		bus->db1_for_mb = TRUE;
793 		bus->dhd->hang_report = TRUE;
794 		bus->use_mailbox = FALSE;
795 		bus->use_d0_inform = FALSE;
796 		bus->intr_enabled = FALSE;
797 		bus->flr_force_fail = FALSE;
798 		/* By default disable HWA and enable it via iovar */
799 		bus->hwa_enab_bmap = 0;
800 		/* update the dma indices if set through module parameter. */
801 		if (dma_ring_indices != 0) {
802 			dhdpcie_set_dma_ring_indices(bus->dhd, dma_ring_indices);
803 		}
804 		/* update h2d phase support if set through module parameter */
805 		bus->dhd->h2d_phase_supported = h2d_phase ? TRUE : FALSE;
806 		/* update force trap on bad phase if set through module parameter */
807 		bus->dhd->force_dongletrap_on_bad_h2d_phase =
808 			force_trap_bad_h2d_phase ? TRUE : FALSE;
809 #ifdef IDLE_TX_FLOW_MGMT
810 		bus->enable_idle_flowring_mgmt = FALSE;
811 #endif /* IDLE_TX_FLOW_MGMT */
812 		bus->irq_registered = FALSE;
813 
814 #ifdef DHD_MSI_SUPPORT
815 #ifdef DHD_FORCE_MSI
816 		bus->d2h_intr_method = PCIE_MSI;
817 #else
818 		bus->d2h_intr_method = enable_msi && dhdpcie_chip_support_msi(bus) ?
819 			PCIE_MSI : PCIE_INTX;
820 #endif /* DHD_FORCE_MSI */
821 #else
822 		bus->d2h_intr_method = PCIE_INTX;
823 #endif /* DHD_MSI_SUPPORT */
824 
825 #ifdef DHD_HP2P
826 		bus->hp2p_txcpl_max_items = DHD_MAX_ITEMS_HPP_TXCPL_RING;
827 		bus->hp2p_rxcpl_max_items = DHD_MAX_ITEMS_HPP_RXCPL_RING;
828 #endif /* DHD_HP2P */
829 
830 		DHD_TRACE(("%s: EXIT SUCCESS\n",
831 			__FUNCTION__));
832 		g_dhd_bus = bus;
833 		*bus_ptr = bus;
834 		return ret;
835 	} while (0);
836 
837 	DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
838 
839 	if (bus && bus->pcie_sh) {
840 		MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
841 	}
842 
843 	if (bus) {
844 		MFREE(osh, bus, sizeof(dhd_bus_t));
845 	}
846 
847 	return ret;
848 }
849 
850 bool
dhd_bus_skip_clm(dhd_pub_t * dhdp)851 dhd_bus_skip_clm(dhd_pub_t *dhdp)
852 {
853 	switch (dhd_bus_chip_id(dhdp)) {
854 		case BCM4369_CHIP_ID:
855 			return TRUE;
856 		default:
857 			return FALSE;
858 	}
859 }
860 
861 uint
dhd_bus_chip(struct dhd_bus * bus)862 dhd_bus_chip(struct dhd_bus *bus)
863 {
864 	ASSERT(bus->sih != NULL);
865 	return bus->sih->chip;
866 }
867 
868 uint
dhd_bus_chiprev(struct dhd_bus * bus)869 dhd_bus_chiprev(struct dhd_bus *bus)
870 {
871 	ASSERT(bus);
872 	ASSERT(bus->sih != NULL);
873 	return bus->sih->chiprev;
874 }
875 
876 void *
dhd_bus_pub(struct dhd_bus * bus)877 dhd_bus_pub(struct dhd_bus *bus)
878 {
879 	return bus->dhd;
880 }
881 
882 void *
dhd_bus_sih(struct dhd_bus * bus)883 dhd_bus_sih(struct dhd_bus *bus)
884 {
885 	return (void *)bus->sih;
886 }
887 
888 void *
dhd_bus_txq(struct dhd_bus * bus)889 dhd_bus_txq(struct dhd_bus *bus)
890 {
891 	return &bus->txq;
892 }
893 
894 /** Get Chip ID version */
dhd_bus_chip_id(dhd_pub_t * dhdp)895 uint dhd_bus_chip_id(dhd_pub_t *dhdp)
896 {
897 	dhd_bus_t *bus = dhdp->bus;
898 	return  bus->sih->chip;
899 }
900 
901 /** Get Chip Rev ID version */
dhd_bus_chiprev_id(dhd_pub_t * dhdp)902 uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
903 {
904 	dhd_bus_t *bus = dhdp->bus;
905 	return bus->sih->chiprev;
906 }
907 
908 #ifdef DHD_MAP_CHIP_FIRMWARE_PATH
909 /* Get Chip Module ID */
dhd_bus_chipmodule_id(dhd_pub_t * dhdp)910 uint dhd_bus_chipmodule_id(dhd_pub_t *dhdp)
911 {
912     return 0;
913 }
914 #endif /* DHD_MAP_CHIP_FIRMWARE_PATH */
915 
916 /** Get Chip Pkg ID version */
dhd_bus_chippkg_id(dhd_pub_t * dhdp)917 uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
918 {
919 	dhd_bus_t *bus = dhdp->bus;
920 	return bus->sih->chippkg;
921 }
922 
923 /** Conduct Loopback test */
924 int
dhd_bus_dmaxfer_lpbk(dhd_pub_t * dhdp,uint32 type)925 dhd_bus_dmaxfer_lpbk(dhd_pub_t *dhdp, uint32 type)
926 {
927 	dma_xfer_info_t dmaxfer_lpbk;
928 	int ret = BCME_OK;
929 
930 #define PCIE_DMAXFER_LPBK_LENGTH	4096
931 	memset(&dmaxfer_lpbk, 0, sizeof(dma_xfer_info_t));
932 	dmaxfer_lpbk.version = DHD_DMAXFER_VERSION;
933 	dmaxfer_lpbk.length = (uint16)sizeof(dma_xfer_info_t);
934 	dmaxfer_lpbk.num_bytes = PCIE_DMAXFER_LPBK_LENGTH;
935 	dmaxfer_lpbk.type = type;
936 	dmaxfer_lpbk.should_wait = TRUE;
937 
938 	ret = dhd_bus_iovar_op(dhdp, "pcie_dmaxfer", NULL, 0,
939 		(char *)&dmaxfer_lpbk, sizeof(dma_xfer_info_t), IOV_SET);
940 	if (ret < 0) {
941 		DHD_ERROR(("failed to start PCIe Loopback Test!!! "
942 			"Type:%d Reason:%d\n", type, ret));
943 		return ret;
944 	}
945 
946 	if (dmaxfer_lpbk.status != DMA_XFER_SUCCESS) {
947 		DHD_ERROR(("failed to check PCIe Loopback Test!!! "
948 			"Type:%d Status:%d Error code:%d\n", type,
949 			dmaxfer_lpbk.status, dmaxfer_lpbk.error_code));
950 		ret = BCME_ERROR;
951 	} else {
952 		DHD_ERROR(("successful to check PCIe Loopback Test"
953 			" Type:%d\n", type));
954 	}
955 #undef PCIE_DMAXFER_LPBK_LENGTH
956 
957 	return ret;
958 }
959 
960 /* Log the lastest DPC schedule time */
961 void
dhd_bus_set_dpc_sched_time(dhd_pub_t * dhdp)962 dhd_bus_set_dpc_sched_time(dhd_pub_t *dhdp)
963 {
964 	dhdp->bus->dpc_sched_time = OSL_LOCALTIME_NS();
965 }
966 
967 /* Check if there is DPC scheduling errors */
968 bool
dhd_bus_query_dpc_sched_errors(dhd_pub_t * dhdp)969 dhd_bus_query_dpc_sched_errors(dhd_pub_t *dhdp)
970 {
971 	dhd_bus_t *bus = dhdp->bus;
972 	bool sched_err;
973 
974 	if (bus->dpc_entry_time < bus->isr_exit_time) {
975 		/* Kernel doesn't schedule the DPC after processing PCIe IRQ */
976 		sched_err = TRUE;
977 	} else if (bus->dpc_entry_time < bus->resched_dpc_time) {
978 		/* Kernel doesn't schedule the DPC after DHD tries to reschedule
979 		 * the DPC due to pending work items to be processed.
980 		 */
981 		sched_err = TRUE;
982 	} else {
983 		sched_err = FALSE;
984 	}
985 
986 	if (sched_err) {
987 		/* print out minimum timestamp info */
988 		DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
989 			" isr_exit_time="SEC_USEC_FMT
990 			" dpc_entry_time="SEC_USEC_FMT
991 			"\ndpc_exit_time="SEC_USEC_FMT
992 			" dpc_sched_time="SEC_USEC_FMT
993 			" resched_dpc_time="SEC_USEC_FMT"\n",
994 			GET_SEC_USEC(bus->isr_entry_time),
995 			GET_SEC_USEC(bus->isr_exit_time),
996 			GET_SEC_USEC(bus->dpc_entry_time),
997 			GET_SEC_USEC(bus->dpc_exit_time),
998 			GET_SEC_USEC(bus->dpc_sched_time),
999 			GET_SEC_USEC(bus->resched_dpc_time)));
1000 	}
1001 
1002 	return sched_err;
1003 }
1004 
1005 /** Read and clear intstatus. This should be called with interrupts disabled or inside isr */
1006 uint32
dhdpcie_bus_intstatus(dhd_bus_t * bus)1007 dhdpcie_bus_intstatus(dhd_bus_t *bus)
1008 {
1009 	uint32 intstatus = 0;
1010 	uint32 intmask = 0;
1011 
1012 	if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
1013 		DHD_ERROR(("%s: trying to clear intstatus after D3 Ack\n", __FUNCTION__));
1014 		return intstatus;
1015 	}
1016 	if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
1017 		(bus->sih->buscorerev == 2)) {
1018 		intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
1019 		dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
1020 		intstatus &= I_MB;
1021 	} else {
1022 		/* this is a PCIE core register..not a config register... */
1023 		intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
1024 
1025 		/* this is a PCIE core register..not a config register... */
1026 		intmask = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask, 0, 0);
1027 		/* Is device removed. intstatus & intmask read 0xffffffff */
1028 		if (intstatus == (uint32)-1 || intmask == (uint32)-1) {
1029 			DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__));
1030 			DHD_ERROR(("%s: INTSTAT : 0x%x INTMASK : 0x%x.\n",
1031 			    __FUNCTION__, intstatus, intmask));
1032 			bus->is_linkdown = TRUE;
1033 			dhd_pcie_debug_info_dump(bus->dhd);
1034 #ifdef CUSTOMER_HW4_DEBUG
1035 #if defined(OEM_ANDROID)
1036 #ifdef SUPPORT_LINKDOWN_RECOVERY
1037 #ifdef CONFIG_ARCH_MSM
1038 			bus->no_cfg_restore = 1;
1039 #endif /* CONFIG_ARCH_MSM */
1040 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1041 			bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN_EP_DETECT;
1042 			dhd_os_send_hang_message(bus->dhd);
1043 #endif /* OEM_ANDROID */
1044 #endif /* CUSTOMER_HW4_DEBUG */
1045 			return intstatus;
1046 		}
1047 
1048 		intstatus &= intmask;
1049 
1050 		/*
1051 		 * The fourth argument to si_corereg is the "mask" fields of the register to update
1052 		 * and the fifth field is the "value" to update. Now if we are interested in only
1053 		 * few fields of the "mask" bit map, we should not be writing back what we read
1054 		 * By doing so, we might clear/ack interrupts that are not handled yet.
1055 		 */
1056 		si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
1057 			intstatus);
1058 
1059 		intstatus &= bus->def_intmask;
1060 	}
1061 
1062 	return intstatus;
1063 }
1064 
1065 void
dhdpcie_cto_recovery_handler(dhd_pub_t * dhd)1066 dhdpcie_cto_recovery_handler(dhd_pub_t *dhd)
1067 {
1068 	dhd_bus_t *bus = dhd->bus;
1069 	int ret;
1070 
1071 	/* Disable PCIe Runtime PM to avoid D3_ACK timeout.
1072 	 */
1073 	DHD_DISABLE_RUNTIME_PM(dhd);
1074 
1075 	/* Sleep for 1 seconds so that any AXI timeout
1076 	 * if running on ALP clock also will be captured
1077 	 */
1078 	OSL_SLEEP(1000);
1079 
1080 	/* reset backplane and cto,
1081 	 * then access through pcie is recovered.
1082 	 */
1083 	ret = dhdpcie_cto_error_recovery(bus);
1084 	if (!ret) {
1085 		/* Waiting for backplane reset */
1086 		OSL_SLEEP(10);
1087 		/* Dump debug Info */
1088 		dhd_prot_debug_info_print(bus->dhd);
1089 		/* Dump console buffer */
1090 		dhd_bus_dump_console_buffer(bus);
1091 #if defined(DHD_FW_COREDUMP)
1092 		/* save core dump or write to a file */
1093 		if (!bus->is_linkdown && bus->dhd->memdump_enabled) {
1094 #ifdef DHD_SSSR_DUMP
1095 			bus->dhd->collect_sssr = TRUE;
1096 #endif /* DHD_SSSR_DUMP */
1097 			bus->dhd->memdump_type = DUMP_TYPE_CTO_RECOVERY;
1098 			dhdpcie_mem_dump(bus);
1099 		}
1100 #endif /* DHD_FW_COREDUMP */
1101 	}
1102 #ifdef OEM_ANDROID
1103 #ifdef SUPPORT_LINKDOWN_RECOVERY
1104 #ifdef CONFIG_ARCH_MSM
1105 	bus->no_cfg_restore = 1;
1106 #endif /* CONFIG_ARCH_MSM */
1107 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1108 	bus->is_linkdown = TRUE;
1109 	bus->dhd->hang_reason = HANG_REASON_PCIE_CTO_DETECT;
1110 	/* Send HANG event */
1111 	dhd_os_send_hang_message(bus->dhd);
1112 #endif /* OEM_ANDROID */
1113 }
1114 
1115 /**
1116  * Name:  dhdpcie_bus_isr
1117  * Parameters:
1118  * 1: IN int irq   -- interrupt vector
1119  * 2: IN void *arg      -- handle to private data structure
1120  * Return value:
1121  * Status (TRUE or FALSE)
1122  *
1123  * Description:
1124  * Interrupt Service routine checks for the status register,
1125  * disable interrupt and queue DPC if mail box interrupts are raised.
1126  */
1127 int32
dhdpcie_bus_isr(dhd_bus_t * bus)1128 dhdpcie_bus_isr(dhd_bus_t *bus)
1129 {
1130 	uint32 intstatus = 0;
1131 
1132 	do {
1133 		DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1134 		/* verify argument */
1135 		if (!bus) {
1136 			DHD_LOG_MEM(("%s : bus is null pointer, exit \n", __FUNCTION__));
1137 			break;
1138 		}
1139 
1140 		if (bus->dhd->dongle_reset) {
1141 			DHD_LOG_MEM(("%s : dongle is reset\n", __FUNCTION__));
1142 			break;
1143 		}
1144 
1145 		if (bus->dhd->busstate == DHD_BUS_DOWN) {
1146 			DHD_LOG_MEM(("%s : bus is down \n", __FUNCTION__));
1147 			break;
1148 		}
1149 
1150 		/* avoid processing of interrupts until msgbuf prot is inited */
1151 		if (!bus->intr_enabled) {
1152 			DHD_INFO(("%s, not ready to receive interrupts\n", __FUNCTION__));
1153 			break;
1154 		}
1155 
1156 		if (PCIECTO_ENAB(bus)) {
1157 			/* read pci_intstatus */
1158 			intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4);
1159 
1160 			if (intstatus & PCI_CTO_INT_MASK) {
1161 				DHD_ERROR(("%s: ##### CTO RECOVERY REPORTED BY DONGLE "
1162 					"intstat=0x%x enab=%d\n", __FUNCTION__,
1163 					intstatus, bus->cto_enable));
1164 				bus->cto_triggered = 1;
1165 				/*
1166 				 * DAR still accessible
1167 				 */
1168 				dhd_bus_dump_dar_registers(bus);
1169 
1170 				/* Disable further PCIe interrupts */
1171 				dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
1172 				/* Stop Tx flow */
1173 				dhd_bus_stop_queue(bus);
1174 
1175 				/* Schedule CTO recovery */
1176 				dhd_schedule_cto_recovery(bus->dhd);
1177 
1178 				return TRUE;
1179 			}
1180 		}
1181 
1182 		if (bus->d2h_intr_method == PCIE_MSI) {
1183 			/* For MSI, as intstatus is cleared by firmware, no need to read */
1184 			goto skip_intstatus_read;
1185 		}
1186 
1187 		intstatus = dhdpcie_bus_intstatus(bus);
1188 
1189 		/* Check if the interrupt is ours or not */
1190 		if (intstatus == 0) {
1191 			/* in EFI since we poll for interrupt, this message will flood the logs
1192 			* so disable this for EFI
1193 			*/
1194 			DHD_LOG_MEM(("%s : this interrupt is not ours\n", __FUNCTION__));
1195 			bus->non_ours_irq_count++;
1196 			bus->last_non_ours_irq_time = OSL_LOCALTIME_NS();
1197 			break;
1198 		}
1199 
1200 		/* save the intstatus */
1201 		/* read interrupt status register!! Status bits will be cleared in DPC !! */
1202 		bus->intstatus = intstatus;
1203 
1204 		/* return error for 0xFFFFFFFF */
1205 		if (intstatus == (uint32)-1) {
1206 			DHD_LOG_MEM(("%s : wrong interrupt status val : 0x%x\n",
1207 				__FUNCTION__, intstatus));
1208 			dhdpcie_disable_irq_nosync(bus);
1209 			break;
1210 		}
1211 
1212 skip_intstatus_read:
1213 		/*  Overall operation:
1214 		 *    - Mask further interrupts
1215 		 *    - Read/ack intstatus
1216 		 *    - Take action based on bits and state
1217 		 *    - Reenable interrupts (as per state)
1218 		 */
1219 
1220 		/* Count the interrupt call */
1221 		bus->intrcount++;
1222 
1223 		bus->ipend = TRUE;
1224 
1225 		bus->isr_intr_disable_count++;
1226 
1227 		/* For Linux, Macos etc (otherthan NDIS) instead of disabling
1228 		* dongle interrupt by clearing the IntMask, disable directly
1229 		* interrupt from the host side, so that host will not recieve
1230 		* any interrupts at all, even though dongle raises interrupts
1231 		*/
1232         dhdpcie_bus_intr_disable(bus);
1233 		dhdpcie_disable_irq_nosync(bus); /* Disable interrupt!! */
1234 
1235 		bus->intdis = TRUE;
1236 
1237 #if defined(PCIE_ISR_THREAD)
1238 
1239 		DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
1240 		DHD_OS_WAKE_LOCK(bus->dhd);
1241 		while (dhd_bus_dpc(bus));
1242 		DHD_OS_WAKE_UNLOCK(bus->dhd);
1243 #else
1244 		bus->dpc_sched = TRUE;
1245 		dhd_sched_dpc(bus->dhd);     /* queue DPC now!! */
1246 #endif /* defined(SDIO_ISR_THREAD) */
1247 
1248 		DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__));
1249 		return TRUE;
1250 
1251 	} while (0);
1252 
1253 	DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__));
1254 	return FALSE;
1255 }
1256 
1257 int
dhdpcie_set_pwr_state(dhd_bus_t * bus,uint state)1258 dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state)
1259 {
1260 	uint32 cur_state = 0;
1261 	uint32 pm_csr = 0;
1262 	osl_t *osh = bus->osh;
1263 
1264 	pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1265 	cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
1266 
1267 	if (cur_state == state) {
1268 		DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__, cur_state));
1269 		return BCME_OK;
1270 	}
1271 
1272 	if (state > PCIECFGREG_PM_CSR_STATE_D3_HOT)
1273 		return BCME_ERROR;
1274 
1275 	/* Validate the state transition
1276 	* if already in a lower power state, return error
1277 	*/
1278 	if (state != PCIECFGREG_PM_CSR_STATE_D0 &&
1279 			cur_state <= PCIECFGREG_PM_CSR_STATE_D3_COLD &&
1280 			cur_state > state) {
1281 		DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__));
1282 		return BCME_ERROR;
1283 	}
1284 
1285 	pm_csr &= ~PCIECFGREG_PM_CSR_STATE_MASK;
1286 	pm_csr |= state;
1287 
1288 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32), pm_csr);
1289 
1290 	/* need to wait for the specified mandatory pcie power transition delay time */
1291 	if (state == PCIECFGREG_PM_CSR_STATE_D3_HOT ||
1292 			cur_state == PCIECFGREG_PM_CSR_STATE_D3_HOT)
1293 			OSL_DELAY(DHDPCIE_PM_D3_DELAY);
1294 	else if (state == PCIECFGREG_PM_CSR_STATE_D2 ||
1295 			cur_state == PCIECFGREG_PM_CSR_STATE_D2)
1296 			OSL_DELAY(DHDPCIE_PM_D2_DELAY);
1297 
1298 	/* read back the power state and verify */
1299 	pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1300 	cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
1301 	if (cur_state != state) {
1302 		DHD_ERROR(("%s: power transition failed ! Current state is %u \n",
1303 				__FUNCTION__, cur_state));
1304 		return BCME_ERROR;
1305 	} else {
1306 		DHD_ERROR(("%s: power transition to %u success \n",
1307 				__FUNCTION__, cur_state));
1308 	}
1309 
1310 	return BCME_OK;
1311 }
1312 
1313 int
dhdpcie_config_check(dhd_bus_t * bus)1314 dhdpcie_config_check(dhd_bus_t *bus)
1315 {
1316 	uint32 i, val;
1317 	int ret = BCME_ERROR;
1318 
1319 	for (i = 0; i < DHDPCIE_CONFIG_CHECK_RETRY_COUNT; i++) {
1320 		val = OSL_PCI_READ_CONFIG(bus->osh, PCI_CFG_VID, sizeof(uint32));
1321 		if ((val & 0xFFFF) == VENDOR_BROADCOM || (val & 0xFFFF) == VENDOR_CYPRESS) {
1322 			ret = BCME_OK;
1323 			break;
1324 		}
1325 		OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS * 1000);
1326 	}
1327 
1328 	return ret;
1329 }
1330 
1331 int
dhdpcie_config_restore(dhd_bus_t * bus,bool restore_pmcsr)1332 dhdpcie_config_restore(dhd_bus_t *bus, bool restore_pmcsr)
1333 {
1334 	uint32 i;
1335 	osl_t *osh = bus->osh;
1336 
1337 	if (BCME_OK != dhdpcie_config_check(bus)) {
1338 		return BCME_ERROR;
1339 	}
1340 
1341 	for (i = PCI_CFG_REV >> 2; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
1342 		OSL_PCI_WRITE_CONFIG(osh, i << 2, sizeof(uint32), bus->saved_config.header[i]);
1343 	}
1344 	OSL_PCI_WRITE_CONFIG(osh, PCI_CFG_CMD, sizeof(uint32), bus->saved_config.header[1]);
1345 
1346 	if (restore_pmcsr)
1347 		OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR,
1348 			sizeof(uint32), bus->saved_config.pmcsr);
1349 
1350 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_CAP, sizeof(uint32), bus->saved_config.msi_cap);
1351 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, sizeof(uint32),
1352 			bus->saved_config.msi_addr0);
1353 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
1354 			sizeof(uint32), bus->saved_config.msi_addr1);
1355 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_DATA,
1356 			sizeof(uint32), bus->saved_config.msi_data);
1357 
1358 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_DEV_STATUS_CTRL,
1359 			sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat);
1360 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGGEN_DEV_STATUS_CTRL2,
1361 			sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat2);
1362 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL,
1363 			sizeof(uint32), bus->saved_config.exp_link_ctrl_stat);
1364 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL2,
1365 			sizeof(uint32), bus->saved_config.exp_link_ctrl_stat2);
1366 
1367 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
1368 			sizeof(uint32), bus->saved_config.l1pm0);
1369 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
1370 			sizeof(uint32), bus->saved_config.l1pm1);
1371 
1372 	OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, sizeof(uint32),
1373 			bus->saved_config.bar0_win);
1374 	dhdpcie_setbar1win(bus, bus->saved_config.bar1_win);
1375 
1376 	return BCME_OK;
1377 }
1378 
1379 int
dhdpcie_config_save(dhd_bus_t * bus)1380 dhdpcie_config_save(dhd_bus_t *bus)
1381 {
1382 	uint32 i;
1383 	osl_t *osh = bus->osh;
1384 
1385 	if (BCME_OK != dhdpcie_config_check(bus)) {
1386 		return BCME_ERROR;
1387 	}
1388 
1389 	for (i = 0; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
1390 		bus->saved_config.header[i] = OSL_PCI_READ_CONFIG(osh, i << 2, sizeof(uint32));
1391 	}
1392 
1393 	bus->saved_config.pmcsr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
1394 
1395 	bus->saved_config.msi_cap = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_CAP,
1396 			sizeof(uint32));
1397 	bus->saved_config.msi_addr0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_L,
1398 			sizeof(uint32));
1399 	bus->saved_config.msi_addr1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
1400 			sizeof(uint32));
1401 	bus->saved_config.msi_data = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_DATA,
1402 			sizeof(uint32));
1403 
1404 	bus->saved_config.exp_dev_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
1405 			PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32));
1406 	bus->saved_config.exp_dev_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
1407 			PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32));
1408 	bus->saved_config.exp_link_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
1409 			PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32));
1410 	bus->saved_config.exp_link_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
1411 			PCIECFGREG_LINK_STATUS_CTRL2, sizeof(uint32));
1412 
1413 	bus->saved_config.l1pm0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
1414 			sizeof(uint32));
1415 	bus->saved_config.l1pm1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
1416 			sizeof(uint32));
1417 
1418 	bus->saved_config.bar0_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR0_WIN,
1419 			sizeof(uint32));
1420 	bus->saved_config.bar1_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR1_WIN,
1421 			sizeof(uint32));
1422 
1423 	return BCME_OK;
1424 }
1425 
1426 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1427 dhd_pub_t *link_recovery = NULL;
1428 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1429 
1430 static void
dhdpcie_bus_intr_init(dhd_bus_t * bus)1431 dhdpcie_bus_intr_init(dhd_bus_t *bus)
1432 {
1433 	uint buscorerev = bus->sih->buscorerev;
1434 	bus->pcie_mailbox_int = PCIMailBoxInt(buscorerev);
1435 	bus->pcie_mailbox_mask = PCIMailBoxMask(buscorerev);
1436 	bus->d2h_mb_mask = PCIE_MB_D2H_MB_MASK(buscorerev);
1437 	bus->def_intmask = PCIE_MB_D2H_MB_MASK(buscorerev);
1438 	if (buscorerev < 64) {
1439 		bus->def_intmask |= PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
1440 	}
1441 }
1442 
1443 static void
dhdpcie_cc_watchdog_reset(dhd_bus_t * bus)1444 dhdpcie_cc_watchdog_reset(dhd_bus_t *bus)
1445 {
1446 	uint32 wd_en = (bus->sih->buscorerev >= 66) ? WD_SSRESET_PCIE_F0_EN :
1447 		(WD_SSRESET_PCIE_F0_EN | WD_SSRESET_PCIE_ALL_FN_EN);
1448 	pcie_watchdog_reset(bus->osh, bus->sih, WD_ENABLE_MASK, wd_en);
1449 }
1450 
1451 void
dhdpcie_dongle_reset(dhd_bus_t * bus)1452 dhdpcie_dongle_reset(dhd_bus_t *bus)
1453 {
1454 	/* if the pcie link is down, watchdog reset
1455 	 * should not be done, as it may hang
1456 	 */
1457 	if (bus->is_linkdown) {
1458 		return;
1459 	}
1460 
1461 	/* dhd_bus_perform_flr will return BCME_UNSUPPORTED if chip is not FLR capable */
1462 	if (dhd_bus_perform_flr(bus, FALSE) == BCME_UNSUPPORTED) {
1463 #ifdef DHD_USE_BP_RESET
1464 		/* Backplane reset using SPROM cfg register(0x88) for buscorerev <= 24 */
1465 		dhd_bus_perform_bp_reset(bus);
1466 #else
1467 		/* Legacy chipcommon watchdog reset */
1468 		dhdpcie_cc_watchdog_reset(bus);
1469 #endif /* DHD_USE_BP_RESET */
1470 	}
1471 }
1472 
1473 #ifdef CHIPS_CUSTOMER_HW6
1474 void
dhdpcie_bus_mpu_disable(dhd_bus_t * bus)1475 dhdpcie_bus_mpu_disable(dhd_bus_t *bus)
1476 {
1477 	volatile uint32 *cr4_regs;
1478 	if (BCM4378_CHIP(bus->sih->chip)) {
1479 		cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
1480 		if (cr4_regs == NULL) {
1481 			DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
1482 			return;
1483 		}
1484 		if (R_REG(bus->osh, cr4_regs + ARMCR4REG_CORECAP) & ACC_MPU_MASK) {
1485 			/* bus mpu is supported */
1486 			W_REG(bus->osh, cr4_regs + ARMCR4REG_MPUCTRL, 0);
1487 		}
1488 	}
1489 }
1490 #endif /* CHIPS_CUSTOMER_HW6 */
1491 
1492 static bool
dhdpcie_dongle_attach(dhd_bus_t * bus)1493 dhdpcie_dongle_attach(dhd_bus_t *bus)
1494 {
1495 	osl_t *osh = bus->osh;
1496 	volatile void *regsva = (volatile void*)bus->regs;
1497 	uint16 devid;
1498 	uint32 val;
1499 	uint32 reg_val = 0;
1500 	bool is_pcie_reset = FALSE;
1501 	uint32 secureboot;
1502 	sbpcieregs_t *sbpcieregs;
1503 	bool dongle_isolation;
1504 	int32 bcmerror = BCME_ERROR;
1505 	DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
1506 
1507 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1508 	link_recovery = bus->dhd;
1509 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1510 
1511 	bus->alp_only = TRUE;
1512 	bus->sih = NULL;
1513 
1514 	/* Checking PCIe bus status with reading configuration space */
1515 	val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32));
1516 	if ((val & 0xFFFF) != VENDOR_BROADCOM && (val & 0xFFFF) != VENDOR_CYPRESS) {
1517 		DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__));
1518 		goto fail;
1519 	}
1520 	devid = (val >> 16) & 0xFFFF;
1521 	bus->cl_devid = devid;
1522 
1523 	/* Set bar0 window to si_enum_base */
1524 	dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(devid));
1525 
1526 	/*
1527 	 * Checking PCI_SPROM_CONTROL register for preventing invalid address access
1528 	 * due to switch address space from PCI_BUS to SI_BUS.
1529 	 */
1530 	val = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32));
1531 	if (val == 0xffffffff) {
1532 		DHD_ERROR(("%s : failed to read SPROM control register\n", __FUNCTION__));
1533 		goto fail;
1534 	}
1535 
1536 	/* Getting Secureboot capability to make sure that the
1537 	 * functionalities are ristricted to the chips having bootloader
1538 	 */
1539 	secureboot = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_REVID, sizeof(uint32));
1540 
1541 	if (isset(&secureboot, PCIECFGREG_SECURE_MODE_SHIFT)) {
1542 
1543 		/* Set bar0 window to si_pcie_enum_base */
1544 		dhdpcie_bus_cfg_set_bar0_win(bus, si_pcie_enum_base(devid));
1545 		sbpcieregs = (sbpcieregs_t*)(bus->regs);
1546 		DHD_INFO(("%s: before read reg_val:%d\n", __FUNCTION__, reg_val));
1547 		reg_val = R_REG(osh, &sbpcieregs->u1.dar_64.d2h_msg_reg0);
1548 		DHD_INFO(("%s: after reg_val:%d\n", __FUNCTION__, reg_val));
1549 		if (reg_val != D2H_HS_START_STATE || reg_val != (D2H_HS_READY_STATE)) {
1550 		/* si_attach() will provide an SI handle and scan the backplane */
1551 			if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
1552 			     &bus->vars, &bus->varsz))) {
1553 				DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
1554 				goto fail;
1555 			}
1556 			dhdpcie_dongle_reset(bus);
1557 			is_pcie_reset = TRUE;
1558 		}
1559 
1560 		/* Pre ChipID access sequence, make sure that
1561 		 * bootloader is ready before ChipID access.
1562 		 */
1563 		bcmerror = dhdpcie_dongle_host_pre_chipid_access_sequence(osh, regsva);
1564 		if (bcmerror) {
1565 			DHD_ERROR(("%s: error - pre chipid access sequence error %d\n",
1566 				__FUNCTION__, bcmerror));
1567 			goto fail;
1568 		}
1569 
1570 		/* Set bar0 window to si_enum_base */
1571 		dhdpcie_bus_cfg_set_bar0_win(bus, si_enum_base(devid));
1572 	}
1573 
1574 	/* si_attach() will provide an SI handle and scan the backplane */
1575 	if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
1576 	                           &bus->vars, &bus->varsz))) {
1577 		DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
1578 		goto fail;
1579 	}
1580 
1581 	/* Configure CTO Prevention functionality */
1582 #if defined(BCMFPGA_HW)
1583 	DHD_ERROR(("Disable CTO\n"));
1584 	bus->cto_enable = FALSE;
1585 #else
1586 #if defined(BCMPCIE_CTO_PREVENTION)
1587 	if (bus->sih->buscorerev >= 24) {
1588 		DHD_ERROR(("Enable CTO\n"));
1589 		bus->cto_enable = TRUE;
1590 	} else
1591 #endif /* BCMPCIE_CTO_PREVENTION */
1592 	{
1593 		DHD_ERROR(("Disable CTO\n"));
1594 		bus->cto_enable = FALSE;
1595 	}
1596 #endif /* BCMFPGA_HW */
1597 
1598 	if (PCIECTO_ENAB(bus)) {
1599 		dhdpcie_cto_init(bus, TRUE);
1600 	}
1601 
1602 	/* Storing secureboot capability */
1603 	bus->sih->secureboot = isset(&secureboot, PCIECFGREG_SECURE_MODE_SHIFT);
1604 
1605 	if (MULTIBP_ENAB(bus->sih) && (bus->sih->buscorerev >= 66)) {
1606 		/*
1607 		 * HW JIRA - CRWLPCIEGEN2-672
1608 		 * Producer Index Feature which is used by F1 gets reset on F0 FLR
1609 		 * fixed in REV68
1610 		 */
1611 		if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
1612 			dhdpcie_ssreset_dis_enum_rst(bus);
1613 		}
1614 
1615 		/* IOV_DEVRESET could exercise si_detach()/si_attach() again so reset
1616 		*   dhdpcie_bus_release_dongle() --> si_detach()
1617 		*   dhdpcie_dongle_attach() --> si_attach()
1618 		*/
1619 		bus->pwr_req_ref = 0;
1620 	}
1621 
1622 	if (MULTIBP_ENAB(bus->sih)) {
1623 		dhd_bus_pcie_pwr_req_nolock(bus);
1624 	}
1625 
1626 	/* Get info on the ARM and SOCRAM cores... */
1627 	/* Should really be qualified by device id */
1628 	if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
1629 	    (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
1630 	    (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
1631 	    (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
1632 		bus->armrev = si_corerev(bus->sih);
1633 		bus->coreid = si_coreid(bus->sih);
1634 	} else {
1635 		DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
1636 		goto fail;
1637 	}
1638 
1639 	/* CA7 requires coherent bits on */
1640 	if (bus->coreid == ARMCA7_CORE_ID) {
1641 		val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
1642 		dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
1643 			(val | PCIE_BARCOHERENTACCEN_MASK));
1644 	}
1645 
1646 	/* Olympic EFI requirement - stop driver load if FW is already running
1647 	*  need to do this here before pcie_watchdog_reset, because
1648 	*  pcie_watchdog_reset will put the ARM back into halt state
1649 	*/
1650 	if (!dhdpcie_is_arm_halted(bus)) {
1651 		DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n",
1652 				__FUNCTION__));
1653 		goto fail;
1654 	}
1655 
1656 	BCM_REFERENCE(dongle_isolation);
1657 
1658 	/* For inbuilt drivers pcie clk req will be done by RC,
1659 	 * so do not do clkreq from dhd
1660 	 */
1661 	if (dhd_download_fw_on_driverload)
1662 	{
1663 		/* Enable CLKREQ# */
1664 		dhdpcie_clkreq(bus->osh, 1, 1);
1665 	}
1666 
1667 	/*
1668 	 * bus->dhd will be NULL if it is called from dhd_bus_attach, so need to reset
1669 	 * without checking dongle_isolation flag, but if it is called via some other path
1670 	 * like quiesce FLR, then based on dongle_isolation flag, watchdog_reset should
1671 	 * be called.
1672 	 */
1673 	if (bus->dhd == NULL) {
1674 		/* dhd_attach not yet happened, do watchdog reset */
1675 		dongle_isolation = FALSE;
1676 	} else {
1677 		dongle_isolation = bus->dhd->dongle_isolation;
1678 	}
1679 
1680 #ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH
1681 	/*
1682 	 * Issue CC watchdog to reset all the cores on the chip - similar to rmmod dhd
1683 	 * This is required to avoid spurious interrupts to the Host and bring back
1684 	 * dongle to a sane state (on host soft-reboot / watchdog-reboot).
1685 	 */
1686 	if (dongle_isolation == FALSE && is_pcie_reset == FALSE) {
1687 		dhdpcie_dongle_reset(bus);
1688 	}
1689 #endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */
1690 
1691 	/* need to set the force_bt_quiesce flag here
1692 	 * before calling dhdpcie_dongle_flr_or_pwr_toggle
1693 	 */
1694 	bus->force_bt_quiesce = TRUE;
1695 	/*
1696 	 * For buscorerev = 66 and after, F0 FLR should be done independent from F1.
1697 	 * So don't need BT quiesce.
1698 	 */
1699 	if (bus->sih->buscorerev >= 66) {
1700 		bus->force_bt_quiesce = FALSE;
1701 	}
1702 
1703 	dhdpcie_dongle_flr_or_pwr_toggle(bus);
1704 
1705 #ifdef CHIPS_CUSTOMER_HW6
1706 	dhdpcie_bus_mpu_disable(bus);
1707 #endif /* CHIPS_CUSTOMER_HW6 */
1708 
1709 	si_setcore(bus->sih, PCIE2_CORE_ID, 0);
1710 	sbpcieregs = (sbpcieregs_t*)(bus->regs);
1711 
1712 	/* WAR where the BAR1 window may not be sized properly */
1713 	W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
1714 	val = R_REG(osh, &sbpcieregs->configdata);
1715 	W_REG(osh, &sbpcieregs->configdata, val);
1716 
1717 	if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
1718 		/* Only set dongle RAMSIZE to default value when BMC vs ARM usage of SYSMEM is not
1719 		 * adjusted.
1720 		 */
1721 		if (!bus->ramsize_adjusted) {
1722 			if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) {
1723 				DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__));
1724 				goto fail;
1725 			}
1726 			switch ((uint16)bus->sih->chip) {
1727 #ifdef CHIPS_CUSTOMER_HW6
1728 				case BCM4368_CHIP_ID:
1729 					bus->dongle_ram_base = CA7_4368_RAM_BASE;
1730 					bus->orig_ramsize = 0x1c0000;
1731 					break;
1732 				CASE_BCM4367_CHIP:
1733 					bus->dongle_ram_base = CA7_4367_RAM_BASE;
1734 					bus->orig_ramsize = 0x1e0000;
1735 					break;
1736 #endif /* CHIPS_CUSTOMER_HW6 */
1737 				default:
1738 					/* also populate base address */
1739 					bus->dongle_ram_base = CA7_4365_RAM_BASE;
1740 					bus->orig_ramsize = 0x1c0000; /* Reserve 1.75MB for CA7 */
1741 					break;
1742 			}
1743 		}
1744 	} else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
1745 		if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
1746 			DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
1747 			goto fail;
1748 		}
1749 	} else {
1750 		/* cr4 has a different way to find the RAM size from TCM's */
1751 		if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
1752 			DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
1753 			goto fail;
1754 		}
1755 		/* also populate base address */
1756 		switch ((uint16)bus->sih->chip) {
1757 		case BCM4339_CHIP_ID:
1758 		case BCM4335_CHIP_ID:
1759 			bus->dongle_ram_base = CR4_4335_RAM_BASE;
1760 			break;
1761 		case BCM4358_CHIP_ID:
1762 		case BCM4354_CHIP_ID:
1763 		case BCM43567_CHIP_ID:
1764 		case BCM43569_CHIP_ID:
1765 		case BCM4350_CHIP_ID:
1766 		case BCM43570_CHIP_ID:
1767 			bus->dongle_ram_base = CR4_4350_RAM_BASE;
1768 			break;
1769 		case BCM4360_CHIP_ID:
1770 			bus->dongle_ram_base = CR4_4360_RAM_BASE;
1771 			break;
1772 
1773 		case BCM4364_CHIP_ID:
1774 			bus->dongle_ram_base = CR4_4364_RAM_BASE;
1775 			break;
1776 
1777 		CASE_BCM4345_CHIP:
1778 			bus->dongle_ram_base = (bus->sih->chiprev < 6)  /* changed at 4345C0 */
1779 				? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE;
1780 			break;
1781 		CASE_BCM43602_CHIP:
1782 			bus->dongle_ram_base = CR4_43602_RAM_BASE;
1783 			break;
1784 		case BCM4349_CHIP_GRPID:
1785 			/* RAM based changed from 4349c0(revid=9) onwards */
1786 			bus->dongle_ram_base = ((bus->sih->chiprev < 9) ?
1787 				CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9);
1788 			break;
1789 		case BCM4347_CHIP_ID:
1790 		case BCM4357_CHIP_ID:
1791 		case BCM4361_CHIP_ID:
1792 			bus->dongle_ram_base = CR4_4347_RAM_BASE;
1793 			break;
1794 		case BCM4362_CHIP_ID:
1795 			bus->dongle_ram_base = CR4_4362_RAM_BASE;
1796 			break;
1797 		case BCM43751_CHIP_ID:
1798 			bus->dongle_ram_base = CR4_43751_RAM_BASE;
1799 			break;
1800 
1801 		case BCM4373_CHIP_ID:
1802 			bus->dongle_ram_base = CR4_4373_RAM_BASE;
1803 			break;
1804 #ifdef CHIPS_CUSTOMER_HW6
1805 		case BCM4378_CHIP_GRPID:
1806 			bus->dongle_ram_base = CR4_4378_RAM_BASE;
1807 			break;
1808 		case BCM4377_CHIP_ID:
1809 			bus->dongle_ram_base = CR4_4377_RAM_BASE;
1810 			break;
1811 #endif /* CHIPS_CUSTOMER_HW6 */
1812 		case BCM4375_CHIP_ID:
1813 		case BCM4369_CHIP_ID:
1814 			bus->dongle_ram_base = CR4_4369_RAM_BASE;
1815 			break;
1816 		case CYW55560_CHIP_ID:
1817 			bus->dongle_ram_base = CR4_55560_RAM_BASE;
1818 			break;
1819 		default:
1820 			bus->dongle_ram_base = 0;
1821 			DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
1822 			           __FUNCTION__, bus->dongle_ram_base));
1823 		}
1824 	}
1825 
1826 	/* 55560, Dedicated space for TCAM patching and TRX Hader at RAMBASE */
1827 	/* TCAM Patching - 2048[2K], TRX Header - 32Bytes */
1828 	if (bus->sih->chip == CYW55560_CHIP_ID) {
1829 		bus->orig_ramsize -= (CR4_55560_TCAM_SZ + CR4_55560_TRX_HDR_SZ);
1830 	}
1831 
1832 	bus->ramsize = bus->orig_ramsize;
1833 	if (dhd_dongle_memsize)
1834 		dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize);
1835 
1836 	if (bus->ramsize > DONGLE_TCM_MAP_SIZE) {
1837 		DHD_ERROR(("%s : invalid ramsize %d(0x%x) is returned from dongle\n",
1838 				__FUNCTION__, bus->ramsize, bus->ramsize));
1839 		goto fail;
1840 	}
1841 
1842 	DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
1843 	           bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
1844 
1845 	bus->srmemsize = si_socram_srmem_size(bus->sih);
1846 
1847 	dhdpcie_bus_intr_init(bus);
1848 
1849 	/* Set the poll and/or interrupt flags */
1850 	bus->intr = (bool)dhd_intr;
1851 
1852 	bus->idma_enabled = TRUE;
1853 	bus->ifrm_enabled = TRUE;
1854 	DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__));
1855 
1856 	if (MULTIBP_ENAB(bus->sih)) {
1857 		dhd_bus_pcie_pwr_req_clear_nolock(bus);
1858 
1859 		/*
1860 		 * One time clearing of Common Power Domain since HW default is set
1861 		 * Needs to be after FLR because FLR resets PCIe enum back to HW defaults
1862 		 * for 4378B0 (rev 68).
1863 		 * On 4378A0 (rev 66), PCIe enum reset is disabled due to CRWLPCIEGEN2-672
1864 		 */
1865 		si_srpwr_request(bus->sih, SRPWR_DMN0_PCIE_MASK, 0);
1866 
1867 		/*
1868 		 * WAR to fix ARM cold boot;
1869 		 * Assert WL domain in DAR helps but not enum
1870 		 */
1871 		if (bus->sih->buscorerev >= 68) {
1872 			dhd_bus_pcie_pwr_req_wl_domain(bus, TRUE);
1873 		}
1874 	}
1875 
1876 	return 0;
1877 
1878 fail:
1879 	if (bus->sih != NULL) {
1880 		if (MULTIBP_ENAB(bus->sih)) {
1881 			dhd_bus_pcie_pwr_req_clear_nolock(bus);
1882 		}
1883 		/* for EFI even if there is an error, load still succeeds
1884 		* so si_detach should not be called here, it is called during unload
1885 		*/
1886 		si_detach(bus->sih);
1887 		bus->sih = NULL;
1888 	}
1889 	DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__));
1890 	return -1;
1891 }
1892 
1893 int
dhpcie_bus_unmask_interrupt(dhd_bus_t * bus)1894 dhpcie_bus_unmask_interrupt(dhd_bus_t *bus)
1895 {
1896 	dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB);
1897 	return 0;
1898 }
1899 int
dhpcie_bus_mask_interrupt(dhd_bus_t * bus)1900 dhpcie_bus_mask_interrupt(dhd_bus_t *bus)
1901 {
1902 	dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0);
1903 	return 0;
1904 }
1905 
1906 /* Non atomic function, caller should hold appropriate lock */
1907 void
dhdpcie_bus_intr_enable(dhd_bus_t * bus)1908 dhdpcie_bus_intr_enable(dhd_bus_t *bus)
1909 {
1910 	DHD_TRACE(("%s Enter\n", __FUNCTION__));
1911 	if (bus) {
1912 		if (bus->sih && !bus->is_linkdown) {
1913 			/* Skip after recieving D3 ACK */
1914 			if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
1915 				return;
1916 			}
1917 			if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
1918 				(bus->sih->buscorerev == 4)) {
1919 				dhpcie_bus_unmask_interrupt(bus);
1920 			} else {
1921 	#if defined(BCMINTERNAL) && defined(DHD_DBG_DUMP)
1922 				dhd_bus_mmio_trace(bus, bus->pcie_mailbox_mask,
1923 					bus->def_intmask, TRUE);
1924 	#endif
1925 				si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
1926 					bus->def_intmask, bus->def_intmask);
1927 			}
1928 		}
1929 
1930 	}
1931 
1932 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
1933 }
1934 
1935 /* Non atomic function, caller should hold appropriate lock */
1936 void
dhdpcie_bus_intr_disable(dhd_bus_t * bus)1937 dhdpcie_bus_intr_disable(dhd_bus_t *bus)
1938 {
1939 	DHD_TRACE(("%s Enter\n", __FUNCTION__));
1940 	if (bus && bus->sih && !bus->is_linkdown) {
1941 		/* Skip after recieving D3 ACK */
1942 		if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
1943 			return;
1944 		}
1945 		if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
1946 			(bus->sih->buscorerev == 4)) {
1947 			dhpcie_bus_mask_interrupt(bus);
1948 		} else {
1949 			si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_mask,
1950 				bus->def_intmask, 0);
1951 		}
1952 	}
1953 
1954 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
1955 }
1956 
1957 /*
1958  *  dhdpcie_advertise_bus_cleanup advertises that clean up is under progress
1959  * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts
1960  * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for
1961  * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
1962  * they will exit from there itself without marking dhd_bus_busy_state as BUSY.
1963  */
1964 void
dhdpcie_advertise_bus_cleanup(dhd_pub_t * dhdp)1965 dhdpcie_advertise_bus_cleanup(dhd_pub_t	 *dhdp)
1966 {
1967 	unsigned long flags;
1968 	int timeleft;
1969 
1970 #ifdef DHD_PCIE_RUNTIMEPM
1971 	dhdpcie_runtime_bus_wake(dhdp, TRUE, dhdpcie_advertise_bus_cleanup);
1972 #endif /* DHD_PCIE_RUNTIMEPM */
1973 
1974 	dhdp->dhd_watchdog_ms_backup = dhd_watchdog_ms;
1975 	if (dhdp->dhd_watchdog_ms_backup) {
1976 		DHD_ERROR(("%s: Disabling wdtick before dhd deinit\n",
1977 			__FUNCTION__));
1978 		dhd_os_wd_timer(dhdp, 0);
1979 	}
1980 	if (dhdp->busstate != DHD_BUS_DOWN) {
1981 		DHD_GENERAL_LOCK(dhdp, flags);
1982 		dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
1983 		DHD_GENERAL_UNLOCK(dhdp, flags);
1984 	}
1985 
1986 	timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
1987 	if ((timeleft == 0) || (timeleft == 1)) {
1988 		DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
1989 				__FUNCTION__, dhdp->dhd_bus_busy_state));
1990 		ASSERT(0);
1991 	}
1992 
1993 	return;
1994 }
1995 
1996 static void
dhdpcie_bus_remove_prep(dhd_bus_t * bus)1997 dhdpcie_bus_remove_prep(dhd_bus_t *bus)
1998 {
1999 	unsigned long flags;
2000 	DHD_TRACE(("%s Enter\n", __FUNCTION__));
2001 
2002 	DHD_GENERAL_LOCK(bus->dhd, flags);
2003 	DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
2004 	bus->dhd->busstate = DHD_BUS_DOWN;
2005 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
2006 
2007 	dhd_os_sdlock(bus->dhd);
2008 
2009 	if (bus->sih && !bus->dhd->dongle_isolation) {
2010 		if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev) &&
2011 		    bus->sih->chip != CYW55560_CHIP_ID) {
2012 			dhd_bus_pcie_pwr_req_reload_war(bus);
2013 		}
2014 
2015 		/* Has insmod fails after rmmod issue in Brix Android */
2016 #if !defined(OEM_ANDROID) && !defined(ANDROID)
2017 		/* HW4347-909 */
2018 		if ((bus->sih->buscorerev == 19) || (bus->sih->buscorerev == 23)) {
2019 			/* Set PCIE TRefUp time to 100us for 4347 */
2020 			pcie_set_trefup_time_100us(bus->sih);
2021 		}
2022 
2023 		/* disable fast lpo from 4347 */
2024 		/* For 4378/4387, do not disable fast lpo because we always enable fast lpo.
2025 		 * it causes insmod/rmmod reload failure.
2026 		 */
2027 		if ((PMUREV(bus->sih->pmurev) > 31) &&
2028 		    (bus->sih->buscorerev != 66) &&
2029 		    (bus->sih->buscorerev != 68) &&
2030 		    (bus->sih->buscorerev != 69) &&
2031 		    (bus->sih->buscorerev != 70)) {
2032 			si_pmu_fast_lpo_disable(bus->sih);
2033 		}
2034 #endif /* !OEM_ANDROID && !ANDROID */
2035 
2036 		/* if the pcie link is down, watchdog reset
2037 		* should not be done, as it may hang
2038 		*/
2039 
2040 		if (!bus->is_linkdown) {
2041 #ifndef DHD_SKIP_DONGLE_RESET_IN_ATTACH
2042 			/* for efi, depending on bt over pcie mode
2043 			*  we either power toggle or do F0 FLR
2044 			* from dhdpcie_bus_release dongle. So no need to
2045 			* do dongle reset from here
2046 			*/
2047 			dhdpcie_dongle_reset(bus);
2048 #endif /* !DHD_SKIP_DONGLE_RESET_IN_ATTACH */
2049 		}
2050 
2051 		bus->dhd->is_pcie_watchdog_reset = TRUE;
2052 	}
2053 
2054 	dhd_os_sdunlock(bus->dhd);
2055 
2056 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
2057 }
2058 
2059 void
dhd_init_bus_lock(dhd_bus_t * bus)2060 dhd_init_bus_lock(dhd_bus_t *bus)
2061 {
2062 	if (!bus->bus_lock) {
2063 		bus->bus_lock = dhd_os_spin_lock_init(bus->dhd->osh);
2064 	}
2065 }
2066 
2067 void
dhd_deinit_bus_lock(dhd_bus_t * bus)2068 dhd_deinit_bus_lock(dhd_bus_t *bus)
2069 {
2070 	if (bus->bus_lock) {
2071 		dhd_os_spin_lock_deinit(bus->dhd->osh, bus->bus_lock);
2072 		bus->bus_lock = NULL;
2073 	}
2074 }
2075 
2076 void
dhd_init_backplane_access_lock(dhd_bus_t * bus)2077 dhd_init_backplane_access_lock(dhd_bus_t *bus)
2078 {
2079 	if (!bus->backplane_access_lock) {
2080 		bus->backplane_access_lock = dhd_os_spin_lock_init(bus->dhd->osh);
2081 	}
2082 }
2083 
2084 void
dhd_deinit_backplane_access_lock(dhd_bus_t * bus)2085 dhd_deinit_backplane_access_lock(dhd_bus_t *bus)
2086 {
2087 	if (bus->backplane_access_lock) {
2088 		dhd_os_spin_lock_deinit(bus->dhd->osh, bus->backplane_access_lock);
2089 		bus->backplane_access_lock = NULL;
2090 	}
2091 }
2092 
2093 /** Detach and free everything */
2094 void
dhdpcie_bus_release(dhd_bus_t * bus)2095 dhdpcie_bus_release(dhd_bus_t *bus)
2096 {
2097 	bool dongle_isolation = FALSE;
2098 #ifdef BCMQT
2099 	uint buscorerev = 0;
2100 #endif /* BCMQT */
2101 	osl_t *osh = NULL;
2102 	unsigned long flags_bus;
2103 
2104 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2105 
2106 	if (bus) {
2107 
2108 		osh = bus->osh;
2109 		ASSERT(osh);
2110 
2111 		if (bus->dhd) {
2112 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
2113 			debugger_close();
2114 #endif /* DEBUGGER || DHD_DSCOPE */
2115 			dhdpcie_advertise_bus_cleanup(bus->dhd);
2116 			dongle_isolation = bus->dhd->dongle_isolation;
2117 			bus->dhd->is_pcie_watchdog_reset = FALSE;
2118 			dhdpcie_bus_remove_prep(bus);
2119 
2120 			if (bus->intr) {
2121 				DHD_BUS_LOCK(bus->bus_lock, flags_bus);
2122 				dhdpcie_bus_intr_disable(bus);
2123 				DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
2124 				dhdpcie_free_irq(bus);
2125 			}
2126 			dhd_deinit_bus_lock(bus);
2127 			dhd_deinit_backplane_access_lock(bus);
2128 #ifdef BCMQT
2129 			if (IDMA_ACTIVE(bus->dhd)) {
2130 			/**
2131 			 * On FPGA during exit path force set "IDMA Control Register"
2132 			 * to default value 0x0. Otherwise host dongle syc for IDMA fails
2133 			 * during next IDMA initilization(without system reboot)
2134 			 */
2135 				buscorerev = bus->sih->buscorerev;
2136 				si_corereg(bus->sih, bus->sih->buscoreidx,
2137 					IDMAControl(buscorerev), ~0, 0);
2138 			}
2139 #endif /* BCMQT */
2140 			/**
2141 			 * dhdpcie_bus_release_dongle free bus->sih  handle, which is needed to
2142 			 * access Dongle registers.
2143 			 * dhd_detach will communicate with dongle to delete flowring ..etc.
2144 			 * So dhdpcie_bus_release_dongle should be called only after the dhd_detach.
2145 			 */
2146 			dhd_detach(bus->dhd);
2147 			dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
2148 			dhd_free(bus->dhd);
2149 			bus->dhd = NULL;
2150 		}
2151 		/* unmap the regs and tcm here!! */
2152 		if (bus->regs) {
2153 			dhdpcie_bus_reg_unmap(osh, bus->regs, DONGLE_REG_MAP_SIZE);
2154 			bus->regs = NULL;
2155 		}
2156 		if (bus->tcm) {
2157 			dhdpcie_bus_reg_unmap(osh, bus->tcm, DONGLE_TCM_MAP_SIZE);
2158 			bus->tcm = NULL;
2159 		}
2160 
2161 		dhdpcie_bus_release_malloc(bus, osh);
2162 		/* Detach pcie shared structure */
2163 		if (bus->pcie_sh) {
2164 			MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
2165 			bus->pcie_sh = NULL;
2166 		}
2167 
2168 		if (bus->console.buf != NULL) {
2169 			MFREE(osh, bus->console.buf, bus->console.bufsize);
2170 		}
2171 
2172 		/* Finally free bus info */
2173 		MFREE(osh, bus, sizeof(dhd_bus_t));
2174 
2175 		g_dhd_bus = NULL;
2176 	}
2177 
2178 	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
2179 } /* dhdpcie_bus_release */
2180 
2181 void
dhdpcie_bus_release_dongle(dhd_bus_t * bus,osl_t * osh,bool dongle_isolation,bool reset_flag)2182 dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
2183 {
2184 	DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
2185 		bus->dhd, bus->dhd->dongle_reset));
2186 
2187 	if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) {
2188 		DHD_TRACE(("%s Exit\n", __FUNCTION__));
2189 		return;
2190 	}
2191 
2192 	if (bus->is_linkdown) {
2193 		DHD_ERROR(("%s : Skip release dongle due to linkdown \n", __FUNCTION__));
2194 		return;
2195 	}
2196 
2197 	if (bus->sih) {
2198 
2199 		if (!dongle_isolation &&
2200 			(bus->dhd && !bus->dhd->is_pcie_watchdog_reset)) {
2201 			dhdpcie_dongle_reset(bus);
2202 		}
2203 
2204 		dhdpcie_dongle_flr_or_pwr_toggle(bus);
2205 
2206 		if (bus->ltrsleep_on_unload) {
2207 			si_corereg(bus->sih, bus->sih->buscoreidx,
2208 				OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
2209 		}
2210 
2211 		if (bus->sih->buscorerev == 13)
2212 			 pcie_serdes_iddqdisable(bus->osh, bus->sih,
2213 			                         (sbpcieregs_t *) bus->regs);
2214 
2215 		/* For inbuilt drivers pcie clk req will be done by RC,
2216 		 * so do not do clkreq from dhd
2217 		 */
2218 		if (dhd_download_fw_on_driverload)
2219 		{
2220 			/* Disable CLKREQ# */
2221 			dhdpcie_clkreq(bus->osh, 1, 0);
2222 		}
2223 
2224 #ifdef PCIE_SUSPEND_DURING_DETACH
2225 		dhdpcie_bus_clock_stop(bus);
2226 #endif /* PCIE_SUSPEND_DURING_DETACH */
2227 
2228 		if (bus->sih != NULL) {
2229 			si_detach(bus->sih);
2230 			bus->sih = NULL;
2231 		}
2232 		if (bus->vars && bus->varsz)
2233 			MFREE(osh, bus->vars, bus->varsz);
2234 		bus->vars = NULL;
2235 	}
2236 
2237 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
2238 }
2239 
2240 uint32
dhdpcie_bus_cfg_read_dword(dhd_bus_t * bus,uint32 addr,uint32 size)2241 dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size)
2242 {
2243 	uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size);
2244 	return data;
2245 }
2246 
2247 /** 32 bit config write */
2248 void
dhdpcie_bus_cfg_write_dword(dhd_bus_t * bus,uint32 addr,uint32 size,uint32 data)2249 dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data)
2250 {
2251 	OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data);
2252 }
2253 
2254 void
dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t * bus,uint32 data)2255 dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data)
2256 {
2257 	OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data);
2258 }
2259 
2260 void
dhdpcie_bus_dongle_setmemsize(struct dhd_bus * bus,int mem_size)2261 dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
2262 {
2263 	int32 min_size =  DONGLE_MIN_MEMSIZE;
2264 	/* Restrict the memsize to user specified limit */
2265 	DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
2266 		dhd_dongle_memsize, min_size));
2267 	if ((dhd_dongle_memsize > min_size) &&
2268 		(dhd_dongle_memsize < (int32)bus->orig_ramsize))
2269 		bus->ramsize = dhd_dongle_memsize;
2270 }
2271 
2272 void
dhdpcie_bus_release_malloc(dhd_bus_t * bus,osl_t * osh)2273 dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh)
2274 {
2275 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2276 
2277 	if (bus->dhd && bus->dhd->dongle_reset)
2278 		return;
2279 
2280 	if (bus->vars && bus->varsz) {
2281 		MFREE(osh, bus->vars, bus->varsz);
2282 		bus->vars = NULL;
2283 	}
2284 
2285 	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
2286 	return;
2287 
2288 }
2289 
2290 /** Stop bus module: clear pending frames, disable data flow */
dhd_bus_stop(struct dhd_bus * bus,bool enforce_mutex)2291 void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
2292 {
2293 	unsigned long flags, flags_bus;
2294 
2295 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2296 
2297 	if (!bus->dhd)
2298 		return;
2299 
2300 	if (bus->dhd->busstate == DHD_BUS_DOWN) {
2301 		DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__));
2302 		goto done;
2303 	}
2304 
2305 	DHD_DISABLE_RUNTIME_PM(bus->dhd);
2306 
2307 	DHD_GENERAL_LOCK(bus->dhd, flags);
2308 	DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
2309 	bus->dhd->busstate = DHD_BUS_DOWN;
2310 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
2311 
2312 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2313 	atomic_set(&bus->dhd->block_bus, TRUE);
2314 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2315 
2316 	DHD_BUS_LOCK(bus->bus_lock, flags_bus);
2317 	dhdpcie_bus_intr_disable(bus);
2318 	DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
2319 
2320 	if (!bus->is_linkdown) {
2321 		uint32 status;
2322 		status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
2323 		dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
2324 	}
2325 
2326 	if (!dhd_download_fw_on_driverload) {
2327 		dhd_dpc_kill(bus->dhd);
2328 	}
2329 
2330 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2331 	pm_runtime_disable(dhd_bus_to_dev(bus));
2332 	pm_runtime_set_suspended(dhd_bus_to_dev(bus));
2333 	pm_runtime_enable(dhd_bus_to_dev(bus));
2334 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2335 
2336 	/* Clear rx control and wake any waiters */
2337 	dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT);
2338 	dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP);
2339 
2340 done:
2341 	return;
2342 }
2343 
2344 /**
2345  * Watchdog timer function.
2346  * @param dhd   Represents a specific hardware (dongle) instance that this DHD manages
2347  */
dhd_bus_watchdog(dhd_pub_t * dhd)2348 bool dhd_bus_watchdog(dhd_pub_t *dhd)
2349 {
2350 	unsigned long flags;
2351 	dhd_bus_t *bus = dhd->bus;
2352 
2353 	DHD_GENERAL_LOCK(dhd, flags);
2354 	if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) ||
2355 			DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd)) {
2356 		DHD_GENERAL_UNLOCK(dhd, flags);
2357 		return FALSE;
2358 	}
2359 	DHD_BUS_BUSY_SET_IN_WD(dhd);
2360 	DHD_GENERAL_UNLOCK(dhd, flags);
2361 
2362 #ifdef DHD_PCIE_RUNTIMEPM
2363 	dhdpcie_runtime_bus_wake(dhd, TRUE, __builtin_return_address(0));
2364 #endif /* DHD_PCIE_RUNTIMEPM */
2365 
2366 	/* Poll for console output periodically */
2367 	if (dhd->busstate == DHD_BUS_DATA &&
2368 		dhd->dhd_console_ms != 0 &&
2369 		bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE) {
2370 		bus->console.count += dhd_watchdog_ms;
2371 		if (bus->console.count >= dhd->dhd_console_ms) {
2372 			bus->console.count -= dhd->dhd_console_ms;
2373 
2374 			if (MULTIBP_ENAB(bus->sih)) {
2375 				dhd_bus_pcie_pwr_req(bus);
2376 			}
2377 
2378 			/* Make sure backplane clock is on */
2379 			if (dhdpcie_bus_readconsole(bus) < 0) {
2380 				dhd->dhd_console_ms = 0; /* On error, stop trying */
2381 			}
2382 
2383 			if (MULTIBP_ENAB(bus->sih)) {
2384 				dhd_bus_pcie_pwr_req_clear(bus);
2385 			}
2386 		}
2387 	}
2388 
2389 	DHD_GENERAL_LOCK(dhd, flags);
2390 	DHD_BUS_BUSY_CLEAR_IN_WD(dhd);
2391 	dhd_os_busbusy_wake(dhd);
2392 	DHD_GENERAL_UNLOCK(dhd, flags);
2393 
2394 	return TRUE;
2395 } /* dhd_bus_watchdog */
2396 
2397 #if defined(SUPPORT_MULTIPLE_REVISION)
concate_revision_bcm4358(dhd_bus_t * bus,char * fw_path,char * nv_path)2398 static int concate_revision_bcm4358(dhd_bus_t *bus, char *fw_path, char *nv_path)
2399 {
2400 	uint32 chiprev;
2401 #if defined(SUPPORT_MULTIPLE_CHIPS)
2402 	char chipver_tag[20] = "_4358";
2403 #else
2404 	char chipver_tag[10] = {0, };
2405 #endif /* SUPPORT_MULTIPLE_CHIPS */
2406 
2407 	chiprev = dhd_bus_chiprev(bus);
2408 	if (chiprev == 0) {
2409 		DHD_ERROR(("----- CHIP 4358 A0 -----\n"));
2410 		strcat(chipver_tag, "_a0");
2411 	} else if (chiprev == 1) {
2412 		DHD_ERROR(("----- CHIP 4358 A1 -----\n"));
2413 #if defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS)
2414 		strcat(chipver_tag, "_a1");
2415 #endif /* defined(SUPPORT_MULTIPLE_CHIPS) || defined(SUPPORT_MULTIPLE_MODULE_CIS) */
2416 	} else if (chiprev == 3) {
2417 		DHD_ERROR(("----- CHIP 4358 A3 -----\n"));
2418 #if defined(SUPPORT_MULTIPLE_CHIPS)
2419 		strcat(chipver_tag, "_a3");
2420 #endif /* SUPPORT_MULTIPLE_CHIPS */
2421 	} else {
2422 		DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chiprev));
2423 	}
2424 
2425 	strcat(fw_path, chipver_tag);
2426 
2427 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK)
2428 	if (chiprev == 1 || chiprev == 3) {
2429 		int ret = dhd_check_module_b85a();
2430 		if ((chiprev == 1) && (ret < 0)) {
2431 			memset(chipver_tag, 0x00, sizeof(chipver_tag));
2432 			strcat(chipver_tag, "_b85");
2433 			strcat(chipver_tag, "_a1");
2434 		}
2435 	}
2436 
2437 	DHD_ERROR(("%s: chipver_tag %s \n", __FUNCTION__, chipver_tag));
2438 #endif /* defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) */
2439 
2440 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
2441 	if (system_rev >= 10) {
2442 		DHD_ERROR(("----- Board Rev  [%d]-----\n", system_rev));
2443 		strcat(chipver_tag, "_r10");
2444 	}
2445 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
2446 	strcat(nv_path, chipver_tag);
2447 
2448 	return 0;
2449 }
2450 
concate_revision_bcm4359(dhd_bus_t * bus,char * fw_path,char * nv_path)2451 static int concate_revision_bcm4359(dhd_bus_t *bus, char *fw_path, char *nv_path)
2452 {
2453 	uint32 chip_ver;
2454 	char chipver_tag[10] = {0, };
2455 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2456 	defined(SUPPORT_BCM4359_MIXED_MODULES)
2457 	int module_type = -1;
2458 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2459 
2460 	chip_ver = bus->sih->chiprev;
2461 	if (chip_ver == 4) {
2462 		DHD_ERROR(("----- CHIP 4359 B0 -----\n"));
2463 		strncat(chipver_tag, "_b0", strlen("_b0"));
2464 	} else if (chip_ver == 5) {
2465 		DHD_ERROR(("----- CHIP 4359 B1 -----\n"));
2466 		strncat(chipver_tag, "_b1", strlen("_b1"));
2467 	} else if (chip_ver == 9) {
2468 		DHD_ERROR(("----- CHIP 4359 C0 -----\n"));
2469 		strncat(chipver_tag, "_c0", strlen("_c0"));
2470 	} else {
2471 		DHD_ERROR(("----- Unknown chip version, ver=%x -----\n", chip_ver));
2472 		return -1;
2473 	}
2474 
2475 #if defined(SUPPORT_MULTIPLE_MODULE_CIS) && defined(USE_CID_CHECK) && \
2476 	defined(SUPPORT_BCM4359_MIXED_MODULES)
2477 	module_type =  dhd_check_module_b90();
2478 
2479 	switch (module_type) {
2480 		case BCM4359_MODULE_TYPE_B90B:
2481 			strcat(fw_path, chipver_tag);
2482 			break;
2483 		case BCM4359_MODULE_TYPE_B90S:
2484 		default:
2485 			/*
2486 			 * .cid.info file not exist case,
2487 			 * loading B90S FW force for initial MFG boot up.
2488 			*/
2489 			if (chip_ver == 5) {
2490 				strncat(fw_path, "_b90s", strlen("_b90s"));
2491 			}
2492 			strcat(fw_path, chipver_tag);
2493 			strcat(nv_path, chipver_tag);
2494 			break;
2495 	}
2496 #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2497 	strcat(fw_path, chipver_tag);
2498 	strcat(nv_path, chipver_tag);
2499 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK && SUPPORT_BCM4359_MIXED_MODULES */
2500 
2501 	return 0;
2502 }
2503 
2504 #if defined(USE_CID_CHECK)
2505 
2506 #define MAX_EXTENSION 20
2507 #define MODULE_BCM4361_INDEX	3
2508 #define CHIP_REV_A0	1
2509 #define CHIP_REV_A1	2
2510 #define CHIP_REV_B0	3
2511 #define CHIP_REV_B1	4
2512 #define CHIP_REV_B2	5
2513 #define CHIP_REV_C0	6
2514 #define BOARD_TYPE_EPA				0x080f
2515 #define BOARD_TYPE_IPA				0x0827
2516 #define BOARD_TYPE_IPA_OLD			0x081a
2517 #define DEFAULT_CIDINFO_FOR_EPA		"r00a_e000_a0_ePA"
2518 #define DEFAULT_CIDINFO_FOR_IPA		"r00a_e000_a0_iPA"
2519 #define DEFAULT_CIDINFO_FOR_A1		"r01a_e30a_a1"
2520 #define DEFAULT_CIDINFO_FOR_B0		"r01i_e32_b0"
2521 #define MAX_VID_LEN					8
2522 #define CIS_TUPLE_HDR_LEN		2
2523 #if defined(BCM4361_CHIP)
2524 #define CIS_TUPLE_START_ADDRESS		0x18011110
2525 #define CIS_TUPLE_END_ADDRESS		0x18011167
2526 #elif defined(BCM4375_CHIP)
2527 #define CIS_TUPLE_START_ADDRESS		0x18011120
2528 #define CIS_TUPLE_END_ADDRESS		0x18011177
2529 #endif /* defined(BCM4361_CHIP) */
2530 #define CIS_TUPLE_MAX_COUNT		(uint32)((CIS_TUPLE_END_ADDRESS - CIS_TUPLE_START_ADDRESS\
2531 						+ 1) / sizeof(uint32))
2532 #define CIS_TUPLE_TAG_START			0x80
2533 #define CIS_TUPLE_TAG_VENDOR		0x81
2534 #define CIS_TUPLE_TAG_BOARDTYPE		0x1b
2535 #define CIS_TUPLE_TAG_LENGTH		1
2536 #define NVRAM_FEM_MURATA			"_murata"
2537 #define CID_FEM_MURATA				"_mur_"
2538 
2539 typedef struct cis_tuple_format {
2540 	uint8	id;
2541 	uint8	len;	/* total length of tag and data */
2542 	uint8	tag;
2543 	uint8	data[1];
2544 } cis_tuple_format_t;
2545 
2546 typedef struct {
2547 	char cid_ext[MAX_EXTENSION];
2548 	char nvram_ext[MAX_EXTENSION];
2549 	char fw_ext[MAX_EXTENSION];
2550 } naming_info_t;
2551 
2552 naming_info_t bcm4361_naming_table[] = {
2553 	{ {""}, {""}, {""} },
2554 	{ {"r00a_e000_a0_ePA"}, {"_a0_ePA"}, {"_a0_ePA"} },
2555 	{ {"r00a_e000_a0_iPA"}, {"_a0"}, {"_a1"} },
2556 	{ {"r01a_e30a_a1"}, {"_r01a_a1"}, {"_a1"} },
2557 	{ {"r02a_e30a_a1"}, {"_r02a_a1"}, {"_a1"} },
2558 	{ {"r02c_e30a_a1"}, {"_r02c_a1"}, {"_a1"} },
2559 	{ {"r01d_e31_b0"}, {"_r01d_b0"}, {"_b0"} },
2560 	{ {"r01f_e31_b0"}, {"_r01f_b0"}, {"_b0"} },
2561 	{ {"r02g_e31_b0"}, {"_r02g_b0"}, {"_b0"} },
2562 	{ {"r01h_e32_b0"}, {"_r01h_b0"}, {"_b0"} },
2563 	{ {"r01i_e32_b0"}, {"_r01i_b0"}, {"_b0"} },
2564 	{ {"r02j_e32_b0"}, {"_r02j_b0"}, {"_b0"} },
2565 	{ {"r012_1kl_a1"}, {"_r012_a1"}, {"_a1"} },
2566 	{ {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
2567 	{ {"r013_1kl_b0"}, {"_r013_b0"}, {"_b0"} },
2568 	{ {"r014_1kl_b0"}, {"_r014_b0"}, {"_b0"} },
2569 	{ {"r015_1kl_b0"}, {"_r015_b0"}, {"_b0"} },
2570 	{ {"r020_1kl_b0"}, {"_r020_b0"}, {"_b0"} },
2571 	{ {"r021_1kl_b0"}, {"_r021_b0"}, {"_b0"} },
2572 	{ {"r022_1kl_b0"}, {"_r022_b0"}, {"_b0"} },
2573 	{ {"r023_1kl_b0"}, {"_r023_b0"}, {"_b0"} },
2574 	{ {"r024_1kl_b0"}, {"_r024_b0"}, {"_b0"} },
2575 	{ {"r030_1kl_b0"}, {"_r030_b0"}, {"_b0"} },
2576 	{ {"r031_1kl_b0"}, {"_r030_b0"}, {"_b0"} },	/* exceptional case : r31 -> r30 */
2577 	{ {"r032_1kl_b0"}, {"_r032_b0"}, {"_b0"} },
2578 	{ {"r033_1kl_b0"}, {"_r033_b0"}, {"_b0"} },
2579 	{ {"r034_1kl_b0"}, {"_r034_b0"}, {"_b0"} },
2580 	{ {"r02a_e32a_b2"}, {"_r02a_b2"}, {"_b2"} },
2581 	{ {"r02b_e32a_b2"}, {"_r02b_b2"}, {"_b2"} },
2582 	{ {"r020_1qw_b2"}, {"_r020_b2"}, {"_b2"} },
2583 	{ {"r021_1qw_b2"}, {"_r021_b2"}, {"_b2"} },
2584 	{ {"r022_1qw_b2"}, {"_r022_b2"}, {"_b2"} },
2585 	{ {"r031_1qw_b2"}, {"_r031_b2"}, {"_b2"} },
2586 	{ {"r032_1qw_b2"}, {"_r032_b2"}, {"_b2"} },
2587 	{ {"r041_1qw_b2"}, {"_r041_b2"}, {"_b2"} }
2588 };
2589 
2590 #define MODULE_BCM4375_INDEX	3
2591 
2592 naming_info_t bcm4375_naming_table[] = {
2593 	{ {""}, {""}, {""} },
2594 	{ {"e41_es11"}, {"_ES00_semco_b0"}, {"_b0"} },
2595 	{ {"e43_es33"}, {"_ES01_semco_b0"}, {"_b0"} },
2596 	{ {"e43_es34"}, {"_ES02_semco_b0"}, {"_b0"} },
2597 	{ {"e43_es35"}, {"_ES02_semco_b0"}, {"_b0"} },
2598 	{ {"e43_es36"}, {"_ES03_semco_b0"}, {"_b0"} },
2599 	{ {"e43_cs41"}, {"_CS00_semco_b1"}, {"_b1"} },
2600 	{ {"e43_cs51"}, {"_CS01_semco_b1"}, {"_b1"} },
2601 	{ {"e43_cs53"}, {"_CS01_semco_b1"}, {"_b1"} },
2602 	{ {"e43_cs61"}, {"_CS00_skyworks_b1"}, {"_b1"} },
2603 	{ {"1rh_es10"}, {"_1rh_es10_b0"}, {"_b0"} },
2604 	{ {"1rh_es11"}, {"_1rh_es11_b0"}, {"_b0"} },
2605 	{ {"1rh_es12"}, {"_1rh_es12_b0"}, {"_b0"} },
2606 	{ {"1rh_es13"}, {"_1rh_es13_b0"}, {"_b0"} },
2607 	{ {"1rh_es20"}, {"_1rh_es20_b0"}, {"_b0"} },
2608 	{ {"1rh_es32"}, {"_1rh_es32_b0"}, {"_b0"} },
2609 	{ {"1rh_es41"}, {"_1rh_es41_b1"}, {"_b1"} },
2610 	{ {"1rh_es42"}, {"_1rh_es42_b1"}, {"_b1"} },
2611 	{ {"1rh_es43"}, {"_1rh_es43_b1"}, {"_b1"} },
2612 	{ {"1rh_es44"}, {"_1rh_es44_b1"}, {"_b1"} }
2613 };
2614 
2615 static naming_info_t *
dhd_find_naming_info(naming_info_t table[],int table_size,char * module_type)2616 dhd_find_naming_info(naming_info_t table[], int table_size, char *module_type)
2617 {
2618 	int index_found = 0, i = 0;
2619 
2620 	if (module_type && strlen(module_type) > 0) {
2621 		for (i = 1; i < table_size; i++) {
2622 			if (!strncmp(table[i].cid_ext, module_type, strlen(table[i].cid_ext))) {
2623 				index_found = i;
2624 				break;
2625 			}
2626 		}
2627 	}
2628 
2629 	DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found));
2630 
2631 	return &table[index_found];
2632 }
2633 
2634 static naming_info_t *
dhd_find_naming_info_by_cid(naming_info_t table[],int table_size,char * cid_info)2635 dhd_find_naming_info_by_cid(naming_info_t table[], int table_size,
2636 	char *cid_info)
2637 {
2638 	int index_found = 0, i = 0;
2639 	char *ptr;
2640 
2641 	/* truncate extension */
2642 	for (i = 1, ptr = cid_info; i < MODULE_BCM4361_INDEX && ptr; i++) {
2643 		ptr = bcmstrstr(ptr, "_");
2644 		if (ptr) {
2645 			ptr++;
2646 		}
2647 	}
2648 
2649 	for (i = 1; i < table_size && ptr; i++) {
2650 		if (!strncmp(table[i].cid_ext, ptr, strlen(table[i].cid_ext))) {
2651 			index_found = i;
2652 			break;
2653 		}
2654 	}
2655 
2656 	DHD_INFO(("%s: index_found=%d\n", __FUNCTION__, index_found));
2657 
2658 	return &table[index_found];
2659 }
2660 
2661 static int
dhd_parse_board_information_bcm(dhd_bus_t * bus,int * boardtype,unsigned char * vid,int * vid_length)2662 dhd_parse_board_information_bcm(dhd_bus_t *bus, int *boardtype,
2663 	unsigned char *vid, int *vid_length)
2664 {
2665 	int boardtype_backplane_addr[] = {
2666 		0x18010324, /* OTP Control 1 */
2667 		0x18012618, /* PMU min resource mask */
2668 	};
2669 	int boardtype_backplane_data[] = {
2670 		0x00fa0000,
2671 		0x0e4fffff /* Keep on ARMHTAVAIL */
2672 	};
2673 	int int_val = 0, i = 0;
2674 	cis_tuple_format_t *tuple;
2675 	int totlen, len;
2676 	uint32 raw_data[CIS_TUPLE_MAX_COUNT];
2677 
2678 	for (i = 0; i < ARRAYSIZE(boardtype_backplane_addr); i++) {
2679 		/* Write new OTP and PMU configuration */
2680 		if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int),
2681 				&boardtype_backplane_data[i], FALSE) != BCME_OK) {
2682 			DHD_ERROR(("invalid size/addr combination\n"));
2683 			return BCME_ERROR;
2684 		}
2685 
2686 		if (si_backplane_access(bus->sih, boardtype_backplane_addr[i], sizeof(int),
2687 				&int_val, TRUE) != BCME_OK) {
2688 			DHD_ERROR(("invalid size/addr combination\n"));
2689 			return BCME_ERROR;
2690 		}
2691 
2692 		DHD_INFO(("%s: boardtype_backplane_addr 0x%08x rdata 0x%04x\n",
2693 			__FUNCTION__, boardtype_backplane_addr[i], int_val));
2694 	}
2695 
2696 	/* read tuple raw data */
2697 	for (i = 0; i < CIS_TUPLE_MAX_COUNT; i++) {
2698 		if (si_backplane_access(bus->sih, CIS_TUPLE_START_ADDRESS + i * sizeof(uint32),
2699 				sizeof(uint32),	&raw_data[i], TRUE) != BCME_OK) {
2700 			break;
2701 		}
2702 	}
2703 
2704 	totlen = i * sizeof(uint32);
2705 	tuple = (cis_tuple_format_t *)raw_data;
2706 
2707 	/* check the first tuple has tag 'start' */
2708 	if (tuple->id != CIS_TUPLE_TAG_START) {
2709 		return BCME_ERROR;
2710 	}
2711 
2712 	*vid_length = *boardtype = 0;
2713 
2714 	/* find tagged parameter */
2715 	while ((totlen >= (tuple->len + CIS_TUPLE_HDR_LEN)) &&
2716 			(*vid_length == 0 || *boardtype == 0)) {
2717 		len = tuple->len;
2718 
2719 		if ((tuple->tag == CIS_TUPLE_TAG_VENDOR) &&
2720 				(totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
2721 			/* found VID */
2722 			memcpy(vid, tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2723 			*vid_length = tuple->len - CIS_TUPLE_TAG_LENGTH;
2724 			prhex("OTP VID", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2725 		}
2726 		else if ((tuple->tag == CIS_TUPLE_TAG_BOARDTYPE) &&
2727 				(totlen >= (int)(len + CIS_TUPLE_HDR_LEN))) {
2728 			/* found boardtype */
2729 			*boardtype = (int)tuple->data[0];
2730 			prhex("OTP boardtype", tuple->data, tuple->len - CIS_TUPLE_TAG_LENGTH);
2731 		}
2732 
2733 		tuple = (cis_tuple_format_t*)((uint8*)tuple + (len + CIS_TUPLE_HDR_LEN));
2734 		totlen -= (len + CIS_TUPLE_HDR_LEN);
2735 	}
2736 
2737 	if (*vid_length <= 0 || *boardtype <= 0) {
2738 		DHD_ERROR(("failed to parse information (vid=%d, boardtype=%d)\n",
2739 			*vid_length, *boardtype));
2740 		return BCME_ERROR;
2741 	}
2742 
2743 	return BCME_OK;
2744 
2745 }
2746 
2747 static naming_info_t *
dhd_find_naming_info_by_chip_rev(naming_info_t table[],int table_size,dhd_bus_t * bus,bool * is_murata_fem)2748 dhd_find_naming_info_by_chip_rev(naming_info_t table[], int table_size,
2749 	dhd_bus_t *bus, bool *is_murata_fem)
2750 {
2751 	int board_type = 0, chip_rev = 0, vid_length = 0;
2752 	unsigned char vid[MAX_VID_LEN];
2753 	naming_info_t *info = &table[0];
2754 	char *cid_info = NULL;
2755 
2756 	if (!bus || !bus->sih) {
2757 		DHD_ERROR(("%s:bus(%p) or bus->sih is NULL\n", __FUNCTION__, bus));
2758 		return NULL;
2759 	}
2760 	chip_rev = bus->sih->chiprev;
2761 
2762 	if (dhd_parse_board_information_bcm(bus, &board_type, vid, &vid_length)
2763 			!= BCME_OK) {
2764 		DHD_ERROR(("%s:failed to parse board information\n", __FUNCTION__));
2765 		return NULL;
2766 	}
2767 
2768 	DHD_INFO(("%s:chip version %d\n", __FUNCTION__, chip_rev));
2769 
2770 #if defined(BCM4361_CHIP)
2771 	/* A0 chipset has exception only */
2772 	if (chip_rev == CHIP_REV_A0) {
2773 		if (board_type == BOARD_TYPE_EPA) {
2774 			info = dhd_find_naming_info(table, table_size,
2775 				DEFAULT_CIDINFO_FOR_EPA);
2776 		} else if ((board_type == BOARD_TYPE_IPA) ||
2777 				(board_type == BOARD_TYPE_IPA_OLD)) {
2778 			info = dhd_find_naming_info(table, table_size,
2779 				DEFAULT_CIDINFO_FOR_IPA);
2780 		}
2781 	} else {
2782 		cid_info = dhd_get_cid_info(vid, vid_length);
2783 		if (cid_info) {
2784 			info = dhd_find_naming_info_by_cid(table, table_size, cid_info);
2785 			if (strstr(cid_info, CID_FEM_MURATA)) {
2786 				*is_murata_fem = TRUE;
2787 			}
2788 		}
2789 	}
2790 #else
2791 	cid_info = dhd_get_cid_info(vid, vid_length);
2792 	if (cid_info) {
2793 		info = dhd_find_naming_info_by_cid(table, table_size, cid_info);
2794 		if (strstr(cid_info, CID_FEM_MURATA)) {
2795 			*is_murata_fem = TRUE;
2796 		}
2797 	}
2798 #endif /* BCM4361_CHIP */
2799 
2800 	return info;
2801 }
2802 #endif /* USE_CID_CHECK */
2803 
2804 static int
concate_revision_bcm4361(dhd_bus_t * bus,char * fw_path,char * nv_path)2805 concate_revision_bcm4361(dhd_bus_t *bus, char *fw_path, char *nv_path)
2806 {
2807 	int ret = BCME_OK;
2808 #if defined(SUPPORT_BCM4361_MIXED_MODULES) && defined(USE_CID_CHECK)
2809 	char module_type[MAX_VNAME_LEN];
2810 	naming_info_t *info = NULL;
2811 	bool is_murata_fem = FALSE;
2812 
2813 	memset(module_type, 0, sizeof(module_type));
2814 
2815 	if (dhd_check_module_bcm(module_type,
2816 			MODULE_BCM4361_INDEX, &is_murata_fem) == BCME_OK) {
2817 		info = dhd_find_naming_info(bcm4361_naming_table,
2818 			ARRAYSIZE(bcm4361_naming_table), module_type);
2819 	} else {
2820 		/* in case of .cid.info doesn't exists */
2821 		info = dhd_find_naming_info_by_chip_rev(bcm4361_naming_table,
2822 			ARRAYSIZE(bcm4361_naming_table), bus, &is_murata_fem);
2823 	}
2824 
2825 	if (bcmstrnstr(nv_path, PATH_MAX,  "_murata", 7)) {
2826 		is_murata_fem = FALSE;
2827 	}
2828 
2829 	if (info) {
2830 		if (is_murata_fem) {
2831 			strncat(nv_path, NVRAM_FEM_MURATA, strlen(NVRAM_FEM_MURATA));
2832 		}
2833 		strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
2834 		strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
2835 	} else {
2836 		DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__));
2837 		ret = BCME_ERROR;
2838 	}
2839 #else /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2840 	char chipver_tag[10] = {0, };
2841 
2842 	strcat(fw_path, chipver_tag);
2843 	strcat(nv_path, chipver_tag);
2844 #endif /* SUPPORT_MULTIPLE_MODULE_CIS && USE_CID_CHECK */
2845 
2846 	return ret;
2847 }
2848 
2849 static int
concate_revision_bcm4375(dhd_bus_t * bus,char * fw_path,char * nv_path)2850 concate_revision_bcm4375(dhd_bus_t *bus, char *fw_path, char *nv_path)
2851 {
2852 	int ret = BCME_OK;
2853 #if defined(SUPPORT_BCM4375_MIXED_MODULES) && defined(USE_CID_CHECK)
2854 	char module_type[MAX_VNAME_LEN];
2855 	naming_info_t *info = NULL;
2856 	bool is_murata_fem = FALSE;
2857 
2858 	memset(module_type, 0, sizeof(module_type));
2859 
2860 	if (dhd_check_module_bcm(module_type,
2861 			MODULE_BCM4375_INDEX, &is_murata_fem) == BCME_OK) {
2862 		info = dhd_find_naming_info(bcm4375_naming_table,
2863 				ARRAYSIZE(bcm4375_naming_table), module_type);
2864 	} else {
2865 		/* in case of .cid.info doesn't exists */
2866 		info = dhd_find_naming_info_by_chip_rev(bcm4375_naming_table,
2867 				ARRAYSIZE(bcm4375_naming_table), bus, &is_murata_fem);
2868 	}
2869 
2870 	if (info) {
2871 		strncat(nv_path, info->nvram_ext, strlen(info->nvram_ext));
2872 		strncat(fw_path, info->fw_ext, strlen(info->fw_ext));
2873 	} else {
2874 		DHD_ERROR(("%s:failed to find extension for nvram and firmware\n", __FUNCTION__));
2875 		ret = BCME_ERROR;
2876 	}
2877 #else /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */
2878 	char chipver_tag[10] = {0, };
2879 
2880 	strcat(fw_path, chipver_tag);
2881 	strcat(nv_path, chipver_tag);
2882 #endif /* SUPPORT_BCM4375_MIXED_MODULES && USE_CID_CHECK */
2883 
2884 	return ret;
2885 }
2886 
2887 int
concate_revision(dhd_bus_t * bus,char * fw_path,char * nv_path)2888 concate_revision(dhd_bus_t *bus, char *fw_path, char *nv_path)
2889 {
2890 	int res = 0;
2891 
2892 	if (!bus || !bus->sih) {
2893 		DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
2894 		return -1;
2895 	}
2896 
2897 	if (!fw_path || !nv_path) {
2898 		DHD_ERROR(("fw_path or nv_path is null.\n"));
2899 		return res;
2900 	}
2901 
2902 	switch (si_chipid(bus->sih)) {
2903 
2904 	case BCM43569_CHIP_ID:
2905 	case BCM4358_CHIP_ID:
2906 		res = concate_revision_bcm4358(bus, fw_path, nv_path);
2907 		break;
2908 	case BCM4355_CHIP_ID:
2909 	case BCM4359_CHIP_ID:
2910 		res = concate_revision_bcm4359(bus, fw_path, nv_path);
2911 		break;
2912 	case BCM4361_CHIP_ID:
2913 	case BCM4347_CHIP_ID:
2914 		res = concate_revision_bcm4361(bus, fw_path, nv_path);
2915 		break;
2916 	case BCM4375_CHIP_ID:
2917 		res = concate_revision_bcm4375(bus, fw_path, nv_path);
2918 		break;
2919 	default:
2920 		DHD_ERROR(("REVISION SPECIFIC feature is not required\n"));
2921 		return res;
2922 	}
2923 
2924 	return res;
2925 }
2926 #endif /* SUPPORT_MULTIPLE_REVISION */
2927 
2928 uint16
dhd_get_chipid(dhd_pub_t * dhd)2929 dhd_get_chipid(dhd_pub_t *dhd)
2930 {
2931 	dhd_bus_t *bus = dhd->bus;
2932 
2933 	if (bus && bus->sih)
2934 		return (uint16)si_chipid(bus->sih);
2935 	else
2936 		return 0;
2937 }
2938 
2939 /**
2940  * Loads firmware given by caller supplied path and nvram image into PCIe dongle.
2941  *
2942  * BCM_REQUEST_FW specific :
2943  * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
2944  * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
2945  *
2946  * BCMEMBEDIMAGE specific:
2947  * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
2948  * file will be used instead.
2949  *
2950  * @return BCME_OK on success
2951  */
2952 int
dhd_bus_download_firmware(struct dhd_bus * bus,osl_t * osh,char * pfw_path,char * pnv_path)2953 dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
2954                           char *pfw_path, char *pnv_path)
2955 {
2956 	int ret;
2957 
2958 	bus->fw_path = pfw_path;
2959 	bus->nv_path = pnv_path;
2960 
2961 #if defined(SUPPORT_MULTIPLE_REVISION)
2962 	if (concate_revision(bus, bus->fw_path, bus->nv_path) != 0) {
2963 		DHD_ERROR(("%s: fail to concatnate revison \n",
2964 			__FUNCTION__));
2965 		return BCME_BADARG;
2966 	}
2967 #endif /* SUPPORT_MULTIPLE_REVISION */
2968 
2969 #if defined(DHD_BLOB_EXISTENCE_CHECK)
2970 	dhd_set_blob_support(bus->dhd, bus->fw_path);
2971 #endif /* DHD_BLOB_EXISTENCE_CHECK */
2972 
2973 	DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
2974 		__FUNCTION__, bus->fw_path, bus->nv_path));
2975 	dhdpcie_dump_resource(bus);
2976 
2977 	ret = dhdpcie_download_firmware(bus, osh);
2978 
2979 	return ret;
2980 }
2981 
2982 /**
2983  * Loads firmware given by 'bus->fw_path' into PCIe dongle.
2984  *
2985  * BCM_REQUEST_FW specific :
2986  * Given the chip type, determines the to be used file paths within /lib/firmware/brcm/ containing
2987  * firmware and nvm for that chip. If the download fails, retries download with a different nvm file
2988  *
2989  * BCMEMBEDIMAGE specific:
2990  * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
2991  * file will be used instead.
2992  *
2993  * @return BCME_OK on success
2994  */
2995 static int
dhdpcie_download_firmware(struct dhd_bus * bus,osl_t * osh)2996 dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
2997 {
2998 	int ret = 0;
2999 #if defined(BCM_REQUEST_FW)
3000 	uint chipid = bus->sih->chip;
3001 	uint revid = bus->sih->chiprev;
3002 	char fw_path[64] = "/lib/firmware/brcm/bcm";	/* path to firmware image */
3003 	char nv_path[64];		/* path to nvram vars file */
3004 	bus->fw_path = fw_path;
3005 	bus->nv_path = nv_path;
3006 	switch (chipid) {
3007 	case BCM43570_CHIP_ID:
3008 		bcmstrncat(fw_path, "43570", 5);
3009 		switch (revid) {
3010 		case 0:
3011 			bcmstrncat(fw_path, "a0", 2);
3012 			break;
3013 		case 2:
3014 			bcmstrncat(fw_path, "a2", 2);
3015 			break;
3016 		default:
3017 			DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__,
3018 			revid));
3019 			break;
3020 		}
3021 		break;
3022 	default:
3023 		DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__,
3024 		chipid));
3025 		return 0;
3026 	}
3027 	/* load board specific nvram file */
3028 	snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path);
3029 	/* load firmware */
3030 	snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path);
3031 #endif /* BCM_REQUEST_FW */
3032 
3033 	DHD_OS_WAKE_LOCK(bus->dhd);
3034 	ret = _dhdpcie_download_firmware(bus);
3035 
3036 	DHD_OS_WAKE_UNLOCK(bus->dhd);
3037 	return ret;
3038 } /* dhdpcie_download_firmware */
3039 
3040 #define DHD_MEMORY_SET_PATTERN 0xAA
3041 
3042 /**
3043  * Downloads a file containing firmware into dongle memory. In case of a .bea file, the DHD
3044  * is updated with the event logging partitions within that file as well.
3045  *
3046  * @param pfw_path    Path to .bin or .bea file
3047  */
3048 static int
dhdpcie_download_code_file(struct dhd_bus * bus,char * pfw_path)3049 dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
3050 {
3051 	int bcmerror = BCME_ERROR;
3052 	int offset = 0;
3053 	int len = 0;
3054 	bool store_reset;
3055 	char *imgbuf = NULL;
3056 	uint8 *memblock = NULL, *memptr = NULL;
3057 	int offset_end = bus->ramsize;
3058 	uint32 file_size = 0, read_len = 0;
3059 	struct trx_header *trx_hdr;
3060 	bool trx_chk = TRUE;
3061 
3062 #if defined(DHD_FW_MEM_CORRUPTION)
3063 	if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
3064 		dhd_tcm_test_enable = TRUE;
3065 	} else {
3066 		dhd_tcm_test_enable = FALSE;
3067 	}
3068 #endif /* DHD_FW_MEM_CORRUPTION */
3069 	DHD_ERROR(("%s: dhd_tcm_test_enable %u\n", __FUNCTION__, dhd_tcm_test_enable));
3070 	/* TCM check */
3071 	if (dhd_tcm_test_enable && !dhd_bus_tcm_test(bus)) {
3072 		DHD_ERROR(("dhd_bus_tcm_test failed\n"));
3073 		bcmerror = BCME_ERROR;
3074 		goto err;
3075 	}
3076 	DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
3077 
3078 	/* Should succeed in opening image if it is actually given through registry
3079 	 * entry or in module param.
3080 	 */
3081 	imgbuf = dhd_os_open_image1(bus->dhd, pfw_path);
3082 	if (imgbuf == NULL) {
3083 		goto err;
3084 	}
3085 
3086 	file_size = dhd_os_get_image_size(imgbuf);
3087 	if (!file_size) {
3088 		DHD_ERROR(("%s: get file size fails ! \n", __FUNCTION__));
3089 		goto err;
3090 	}
3091 
3092 	memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
3093 	if (memblock == NULL) {
3094 		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
3095 		bcmerror = BCME_NOMEM;
3096 		goto err;
3097 	}
3098 	if ((uint32)(uintptr)memblock % DHD_SDALIGN) {
3099 		memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
3100 	}
3101 
3102 	/* check if CR4/CA7 */
3103 	store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
3104 			si_setcore(bus->sih, ARMCA7_CORE_ID, 0));
3105 	/* Download image with MEMBLOCK size */
3106 	while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) {
3107 		if (len < 0) {
3108 			DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
3109 			bcmerror = BCME_ERROR;
3110 			goto err;
3111 		}
3112 			read_len += len;
3113 		if (read_len > file_size) {
3114 			DHD_ERROR(("%s: WARNING! reading beyond EOF, len=%d; read_len=%u;"
3115 				" file_size=%u truncating len to %d \n", __FUNCTION__,
3116 				len, read_len, file_size, (len - (read_len - file_size))));
3117 			len -= (read_len - file_size);
3118 		}
3119 
3120 		/* if address is 0, store the reset instruction to be written in 0 */
3121 		if (store_reset) {
3122 			ASSERT(offset == 0);
3123 			bus->resetinstr = *(((uint32*)memptr));
3124 			/* Add start of RAM address to the address given by user */
3125 			offset += bus->dongle_ram_base;
3126 			offset_end += offset;
3127 			store_reset = FALSE;
3128 		}
3129 
3130 		/* Check for trx file */
3131 		if (trx_chk && (len >= sizeof(struct trx_header))) {
3132 			trx_chk = FALSE;
3133 			trx_hdr = (struct trx_header *)memptr;
3134 			if (trx_hdr->magic == TRX_MAGIC) {
3135 				/* CYW55560, we need to write TRX header at RAMSTART */
3136 				offset -= sizeof(struct trx_header);
3137 			}
3138 		}
3139 
3140 		bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
3141 		if (bcmerror) {
3142 			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
3143 				__FUNCTION__, bcmerror, MEMBLOCK, offset));
3144 			goto err;
3145 		}
3146 		offset += MEMBLOCK;
3147 
3148 		if (offset >= offset_end) {
3149 			DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
3150 				__FUNCTION__, offset, offset_end));
3151 			bcmerror = BCME_ERROR;
3152 			goto err;
3153 		}
3154 
3155 		if (read_len >= file_size) {
3156 			break;
3157 		}
3158 	}
3159 err:
3160 	if (memblock) {
3161 		MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
3162 	}
3163 
3164 	if (imgbuf) {
3165 		dhd_os_close_image1(bus->dhd, imgbuf);
3166 	}
3167 
3168 	return bcmerror;
3169 } /* dhdpcie_download_code_file */
3170 
3171 #ifdef CUSTOMER_HW4_DEBUG
3172 #define MIN_NVRAMVARS_SIZE 128
3173 #endif /* CUSTOMER_HW4_DEBUG */
3174 
3175 static int
dhdpcie_download_nvram(struct dhd_bus * bus)3176 dhdpcie_download_nvram(struct dhd_bus *bus)
3177 {
3178 	int bcmerror = BCME_ERROR;
3179 	uint len;
3180 	char * memblock = NULL;
3181 	char *bufp;
3182 	char *pnv_path;
3183 	bool nvram_file_exists;
3184 	bool nvram_uefi_exists = FALSE;
3185 	bool local_alloc = FALSE;
3186 	pnv_path = bus->nv_path;
3187 
3188 	nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
3189 
3190 	/* First try UEFI */
3191 	len = MAX_NVRAMBUF_SIZE;
3192 	dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, (int *)&len);
3193 
3194 	/* If UEFI empty, then read from file system */
3195 	if ((len <= 0) || (memblock == NULL)) {
3196 
3197 		if (nvram_file_exists) {
3198 			len = MAX_NVRAMBUF_SIZE;
3199 			dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, (int *)&len);
3200 			if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) {
3201 				goto err;
3202 			}
3203 		}
3204 		else {
3205 			/* For SROM OTP no external file or UEFI required */
3206 			bcmerror = BCME_OK;
3207 		}
3208 	} else {
3209 		nvram_uefi_exists = TRUE;
3210 	}
3211 
3212 	DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len));
3213 
3214 	if (len > 0 && len <= MAX_NVRAMBUF_SIZE && memblock != NULL) {
3215 		bufp = (char *) memblock;
3216 
3217 		{
3218 			bufp[len] = 0;
3219 			if (nvram_uefi_exists || nvram_file_exists) {
3220 				len = process_nvram_vars(bufp, len);
3221 			}
3222 		}
3223 
3224 		DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len));
3225 #ifdef CUSTOMER_HW4_DEBUG
3226 		if (len < MIN_NVRAMVARS_SIZE) {
3227 			DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n",
3228 				__FUNCTION__));
3229 			bcmerror = BCME_ERROR;
3230 			goto err;
3231 		}
3232 #endif /* CUSTOMER_HW4_DEBUG */
3233 
3234 		if (len % 4) {
3235 			len += 4 - (len % 4);
3236 		}
3237 		bufp += len;
3238 		*bufp++ = 0;
3239 		if (len)
3240 			bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1);
3241 		if (bcmerror) {
3242 			DHD_ERROR(("%s: error downloading vars: %d\n",
3243 				__FUNCTION__, bcmerror));
3244 		}
3245 	}
3246 
3247 err:
3248 	if (memblock) {
3249 		if (local_alloc) {
3250 			MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
3251 		} else {
3252 			dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE);
3253 		}
3254 	}
3255 
3256 	return bcmerror;
3257 }
3258 
3259 static int
dhdpcie_ramsize_read_image(struct dhd_bus * bus,char * buf,int len)3260 dhdpcie_ramsize_read_image(struct dhd_bus *bus, char *buf, int len)
3261 {
3262 	int bcmerror = BCME_ERROR;
3263 	char *imgbuf = NULL;
3264 
3265 	if (buf == NULL || len == 0)
3266 		goto err;
3267 
3268 	/* External image takes precedence if specified */
3269 	if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
3270 		// opens and seeks to correct file offset:
3271 		imgbuf = dhd_os_open_image1(bus->dhd, bus->fw_path);
3272 		if (imgbuf == NULL) {
3273 			DHD_ERROR(("%s: Failed to open firmware file\n", __FUNCTION__));
3274 			goto err;
3275 		}
3276 
3277 		/* Read it */
3278 		if (len != dhd_os_get_image_block(buf, len, imgbuf)) {
3279 			DHD_ERROR(("%s: Failed to read %d bytes data\n", __FUNCTION__, len));
3280 			goto err;
3281 		}
3282 
3283 		bcmerror = BCME_OK;
3284 	}
3285 
3286 err:
3287 	if (imgbuf)
3288 		dhd_os_close_image1(bus->dhd, imgbuf);
3289 
3290 	return bcmerror;
3291 }
3292 
3293 /* The ramsize can be changed in the dongle image, for example 4365 chip share the sysmem
3294  * with BMC and we can adjust how many sysmem belong to CA7 during dongle compilation.
3295  * So in DHD we need to detect this case and update the correct dongle RAMSIZE as well.
3296  */
3297 static void
dhdpcie_ramsize_adj(struct dhd_bus * bus)3298 dhdpcie_ramsize_adj(struct dhd_bus *bus)
3299 {
3300 	int i, search_len = 0;
3301 	uint8 *memptr = NULL;
3302 	uint8 *ramsizeptr = NULL;
3303 	uint ramsizelen;
3304 	uint32 ramsize_ptr_ptr[] = {RAMSIZE_PTR_PTR_LIST};
3305 	hnd_ramsize_ptr_t ramsize_info;
3306 
3307 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3308 
3309 	/* Adjust dongle RAMSIZE already called. */
3310 	if (bus->ramsize_adjusted) {
3311 		return;
3312 	}
3313 
3314 	/* success or failure,  we don't want to be here
3315 	 * more than once.
3316 	 */
3317 	bus->ramsize_adjusted = TRUE;
3318 
3319 	/* Not handle if user restrict dongle ram size enabled */
3320 	if (dhd_dongle_memsize) {
3321 		DHD_ERROR(("%s: user restrict dongle ram size to %d.\n", __FUNCTION__,
3322 			dhd_dongle_memsize));
3323 		return;
3324 	}
3325 
3326 	/* Out immediately if no image to download */
3327 	if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
3328 		DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
3329 		return;
3330 	}
3331 
3332 	/* Get maximum RAMSIZE info search length */
3333 	for (i = 0; ; i++) {
3334 		if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END)
3335 			break;
3336 
3337 		if (search_len < (int)ramsize_ptr_ptr[i])
3338 			search_len = (int)ramsize_ptr_ptr[i];
3339 	}
3340 
3341 	if (!search_len)
3342 		return;
3343 
3344 	search_len += sizeof(hnd_ramsize_ptr_t);
3345 
3346 	memptr = MALLOC(bus->dhd->osh, search_len);
3347 	if (memptr == NULL) {
3348 		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, search_len));
3349 		return;
3350 	}
3351 
3352 	/* External image takes precedence if specified */
3353 	if (dhdpcie_ramsize_read_image(bus, (char *)memptr, search_len) != BCME_OK) {
3354 		goto err;
3355 	}
3356 	else {
3357 		ramsizeptr = memptr;
3358 		ramsizelen = search_len;
3359 	}
3360 
3361 	if (ramsizeptr) {
3362 		/* Check Magic */
3363 		for (i = 0; ; i++) {
3364 			if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END)
3365 				break;
3366 
3367 			if (ramsize_ptr_ptr[i] + sizeof(hnd_ramsize_ptr_t) > ramsizelen)
3368 				continue;
3369 
3370 			memcpy((char *)&ramsize_info, ramsizeptr + ramsize_ptr_ptr[i],
3371 				sizeof(hnd_ramsize_ptr_t));
3372 
3373 			if (ramsize_info.magic == HTOL32(HND_RAMSIZE_PTR_MAGIC)) {
3374 				bus->orig_ramsize = LTOH32(ramsize_info.ram_size);
3375 				bus->ramsize = LTOH32(ramsize_info.ram_size);
3376 				DHD_ERROR(("%s: Adjust dongle RAMSIZE to 0x%x\n", __FUNCTION__,
3377 					bus->ramsize));
3378 				break;
3379 			}
3380 		}
3381 	}
3382 
3383 err:
3384 	if (memptr)
3385 		MFREE(bus->dhd->osh, memptr, search_len);
3386 
3387 	return;
3388 } /* dhdpcie_ramsize_adj */
3389 
3390 /**
3391  * Downloads firmware file given by 'bus->fw_path' into PCIe dongle
3392  *
3393  * BCMEMBEDIMAGE specific:
3394  * If bus->fw_path is empty, or if the download of bus->fw_path failed, firmware contained in header
3395  * file will be used instead.
3396  *
3397  */
3398 static int
_dhdpcie_download_firmware(struct dhd_bus * bus)3399 _dhdpcie_download_firmware(struct dhd_bus *bus)
3400 {
3401 	int bcmerror = -1;
3402 
3403 	bool embed = FALSE;	/* download embedded firmware */
3404 	bool dlok = FALSE;	/* download firmware succeeded */
3405 
3406 	/* Out immediately if no image to download */
3407 	if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
3408 		DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
3409 		return 0;
3410 	}
3411 	/* Adjust ram size */
3412 	dhdpcie_ramsize_adj(bus);
3413 
3414 	/* Keep arm in reset */
3415 	if (dhdpcie_bus_download_state(bus, TRUE)) {
3416 		DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
3417 		goto err;
3418 	}
3419 
3420 	/* External image takes precedence if specified */
3421 	if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
3422 		if (dhdpcie_download_code_file(bus, bus->fw_path)) {
3423 			DHD_ERROR(("%s:%d dongle image file download failed\n", __FUNCTION__,
3424 				__LINE__));
3425 			goto err;
3426 		} else {
3427 			embed = FALSE;
3428 			dlok = TRUE;
3429 		}
3430 	}
3431 
3432 	BCM_REFERENCE(embed);
3433 	if (!dlok) {
3434 		DHD_ERROR(("%s:%d dongle image download failed\n", __FUNCTION__, __LINE__));
3435 		goto err;
3436 	}
3437 
3438 	/* EXAMPLE: nvram_array */
3439 	/* If a valid nvram_arry is specified as above, it can be passed down to dongle */
3440 	/* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
3441 
3442 	/* External nvram takes precedence if specified */
3443 	if (dhdpcie_download_nvram(bus)) {
3444 		DHD_ERROR(("%s:%d dongle nvram file download failed\n", __FUNCTION__, __LINE__));
3445 		goto err;
3446 	}
3447 
3448 	/* Take arm out of reset */
3449 	if (dhdpcie_bus_download_state(bus, FALSE)) {
3450 		DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
3451 		goto err;
3452 	}
3453 
3454 	bcmerror = 0;
3455 
3456 err:
3457 	return bcmerror;
3458 } /* _dhdpcie_download_firmware */
3459 
3460 static int
dhdpcie_bus_readconsole(dhd_bus_t * bus)3461 dhdpcie_bus_readconsole(dhd_bus_t *bus)
3462 {
3463 	dhd_console_t *c = &bus->console;
3464 	uint8 line[CONSOLE_LINE_MAX], ch;
3465 	uint32 n, idx, addr;
3466 	int rv;
3467 	uint readlen = 0;
3468 	uint i = 0;
3469 
3470 	if (!DHD_FWLOG_ON())
3471 		return 0;
3472 
3473 	/* Don't do anything until FWREADY updates console address */
3474 	if (bus->console_addr == 0)
3475 		return -1;
3476 
3477 	/* Read console log struct */
3478 	addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
3479 
3480 	/* Check if console log struct addr has changed */
3481 	/* Save the address(Local copy) */
3482 	if (c->log_addr != addr) {
3483 		/* Reset last index pointer */
3484 		c->last = 0;
3485 		/* Re-allocate memory if console address changes */
3486 		if (c->buf) {
3487 			MFREE(bus->dhd->osh, c->buf, c->bufsize);
3488 			c->buf = NULL;
3489 		}
3490 		/* Save new console address */
3491 		c->log_addr = addr;
3492 	}
3493 
3494 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
3495 		return rv;
3496 
3497 	/* Allocate console buffer (one time only) */
3498 	if (c->buf == NULL) {
3499 		c->bufsize = ltoh32(c->log.buf_size);
3500 		if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
3501 			return BCME_NOMEM;
3502 		DHD_ERROR(("conlog: bufsize=0x%x\n", c->bufsize));
3503 	}
3504 	idx = ltoh32(c->log.idx);
3505 
3506 	/* Protect against corrupt value */
3507 	if (idx > c->bufsize)
3508 		return BCME_ERROR;
3509 
3510 	/* Skip reading the console buffer if the index pointer has not moved */
3511 	if (idx == c->last)
3512 		return BCME_OK;
3513 
3514 	DHD_ERROR(("conlog: addr=0x%x, idx=0x%x, last=0x%x \n", c->log.buf,
3515 	   idx, c->last));
3516 
3517 	/* Read the console buffer data to a local buffer */
3518 	/* optimize and read only the portion of the buffer needed, but
3519 	 * important to handle wrap-around.
3520 	 */
3521 	addr = ltoh32(c->log.buf);
3522 
3523 	/* wrap around case - write ptr < read ptr */
3524 	if (idx < c->last) {
3525 		/* from read ptr to end of buffer */
3526 		readlen = c->bufsize - c->last;
3527 		if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3528 				addr + c->last, c->buf, readlen)) < 0) {
3529 			DHD_ERROR(("conlog: read error[1] ! \n"));
3530 			return rv;
3531 		}
3532 		/* from beginning of buffer to write ptr */
3533 		if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3534 				addr, c->buf + readlen,
3535 				idx)) < 0) {
3536 			DHD_ERROR(("conlog: read error[2] ! \n"));
3537 			return rv;
3538 		}
3539 		readlen += idx;
3540 	} else {
3541 		/* non-wraparound case, write ptr > read ptr */
3542 		readlen = (uint)idx - c->last;
3543 		if ((rv = dhdpcie_bus_membytes(bus, FALSE,
3544 				addr + c->last, c->buf, readlen)) < 0) {
3545 			DHD_ERROR(("conlog: read error[3] ! \n"));
3546 			return rv;
3547 		}
3548 	}
3549 	/* update read ptr */
3550 	c->last = idx;
3551 
3552 	/* now output the read data from the local buffer to the host console */
3553 	while (i < readlen) {
3554 		for (n = 0; n < CONSOLE_LINE_MAX - 2 && i < readlen; n++) {
3555 			ch = c->buf[i];
3556 			++i;
3557 			if (ch == '\n')
3558 				break;
3559 			line[n] = ch;
3560 		}
3561 
3562 		if (n > 0) {
3563 			if (line[n - 1] == '\r')
3564 				n--;
3565 			line[n] = 0;
3566 			DHD_FWLOG(("CONSOLE: %s\n", line));
3567 		}
3568 	}
3569 
3570 	return BCME_OK;
3571 
3572 } /* dhdpcie_bus_readconsole */
3573 
3574 void
dhd_bus_dump_console_buffer(dhd_bus_t * bus)3575 dhd_bus_dump_console_buffer(dhd_bus_t *bus)
3576 {
3577 	uint32 n, i;
3578 	uint32 addr;
3579 	char *console_buffer = NULL;
3580 	uint32 console_ptr, console_size, console_index;
3581 	uint8 line[CONSOLE_LINE_MAX], ch;
3582 	int rv;
3583 
3584 	DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__));
3585 
3586 	if (bus->is_linkdown) {
3587 		DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n", __FUNCTION__));
3588 		return;
3589 	}
3590 
3591 	addr =	bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
3592 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3593 		(uint8 *)&console_ptr, sizeof(console_ptr))) < 0) {
3594 		goto exit;
3595 	}
3596 
3597 	addr =	bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
3598 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3599 		(uint8 *)&console_size, sizeof(console_size))) < 0) {
3600 		goto exit;
3601 	}
3602 
3603 	addr =	bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
3604 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
3605 		(uint8 *)&console_index, sizeof(console_index))) < 0) {
3606 		goto exit;
3607 	}
3608 
3609 	console_ptr = ltoh32(console_ptr);
3610 	console_size = ltoh32(console_size);
3611 	console_index = ltoh32(console_index);
3612 
3613 	if (console_size > CONSOLE_BUFFER_MAX ||
3614 		!(console_buffer = MALLOC(bus->dhd->osh, console_size))) {
3615 		goto exit;
3616 	}
3617 
3618 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
3619 		(uint8 *)console_buffer, console_size)) < 0) {
3620 		goto exit;
3621 	}
3622 
3623 	for (i = 0, n = 0; i < console_size; i += n + 1) {
3624 		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
3625 			ch = console_buffer[(console_index + i + n) % console_size];
3626 			if (ch == '\n')
3627 				break;
3628 			line[n] = ch;
3629 		}
3630 
3631 		if (n > 0) {
3632 			if (line[n - 1] == '\r')
3633 				n--;
3634 			line[n] = 0;
3635 			/* Don't use DHD_ERROR macro since we print
3636 			 * a lot of information quickly. The macro
3637 			 * will truncate a lot of the printfs
3638 			 */
3639 
3640 			DHD_FWLOG(("CONSOLE: %s\n", line));
3641 		}
3642 	}
3643 
3644 exit:
3645 	if (console_buffer)
3646 		MFREE(bus->dhd->osh, console_buffer, console_size);
3647 	return;
3648 }
3649 
3650 /**
3651  * Opens the file given by bus->fw_path, reads part of the file into a buffer and closes the file.
3652  *
3653  * @return BCME_OK on success
3654  */
3655 static int
dhdpcie_checkdied(dhd_bus_t * bus,char * data,uint size)3656 dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
3657 {
3658 	int bcmerror = 0;
3659 	uint msize = 512;
3660 	char *mbuffer = NULL;
3661 	uint maxstrlen = 256;
3662 	char *str = NULL;
3663 	pciedev_shared_t *local_pciedev_shared = bus->pcie_sh;
3664 	struct bcmstrbuf strbuf;
3665 	unsigned long flags;
3666 	bool dongle_trap_occured = FALSE;
3667 
3668 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3669 
3670 	if (DHD_NOCHECKDIED_ON()) {
3671 		return 0;
3672 	}
3673 
3674 	if (data == NULL) {
3675 		/*
3676 		 * Called after a rx ctrl timeout. "data" is NULL.
3677 		 * allocate memory to trace the trap or assert.
3678 		 */
3679 		size = msize;
3680 		mbuffer = data = MALLOC(bus->dhd->osh, msize);
3681 
3682 		if (mbuffer == NULL) {
3683 			DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
3684 			bcmerror = BCME_NOMEM;
3685 			goto done2;
3686 		}
3687 	}
3688 
3689 	if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
3690 		DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
3691 		bcmerror = BCME_NOMEM;
3692 		goto done2;
3693 	}
3694 	DHD_GENERAL_LOCK(bus->dhd, flags);
3695 	DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd);
3696 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
3697 
3698 	if (MULTIBP_ENAB(bus->sih)) {
3699 		dhd_bus_pcie_pwr_req(bus);
3700 	}
3701 	if ((bcmerror = dhdpcie_readshared(bus)) < 0) {
3702 		goto done1;
3703 	}
3704 
3705 	bcm_binit(&strbuf, data, size);
3706 
3707 	bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address  : 0x%08X\n",
3708 	            local_pciedev_shared->msgtrace_addr, local_pciedev_shared->console_addr);
3709 
3710 	if ((local_pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
3711 		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
3712 		 * (Avoids conflict with real asserts for programmatic parsing of output.)
3713 		 */
3714 		bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
3715 	}
3716 
3717 	if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) {
3718 		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
3719 		 * (Avoids conflict with real asserts for programmatic parsing of output.)
3720 		 */
3721 		bcm_bprintf(&strbuf, "No trap%s in dongle",
3722 		          (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT)
3723 		          ?"/assrt" :"");
3724 	} else {
3725 		if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) {
3726 			/* Download assert */
3727 			bcm_bprintf(&strbuf, "Dongle assert");
3728 			if (bus->pcie_sh->assert_exp_addr != 0) {
3729 				str[0] = '\0';
3730 				if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
3731 					bus->pcie_sh->assert_exp_addr,
3732 					(uint8 *)str, maxstrlen)) < 0) {
3733 					goto done1;
3734 				}
3735 
3736 				str[maxstrlen - 1] = '\0';
3737 				bcm_bprintf(&strbuf, " expr \"%s\"", str);
3738 			}
3739 
3740 			if (bus->pcie_sh->assert_file_addr != 0) {
3741 				str[0] = '\0';
3742 				if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
3743 					bus->pcie_sh->assert_file_addr,
3744 					(uint8 *)str, maxstrlen)) < 0) {
3745 					goto done1;
3746 				}
3747 
3748 				str[maxstrlen - 1] = '\0';
3749 				bcm_bprintf(&strbuf, " file \"%s\"", str);
3750 			}
3751 
3752 			bcm_bprintf(&strbuf, " line %d ",  bus->pcie_sh->assert_line);
3753 		}
3754 
3755 		if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
3756 			trap_t *tr = &bus->dhd->last_trap_info;
3757 			dongle_trap_occured = TRUE;
3758 			if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
3759 				bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) {
3760 				bus->dhd->dongle_trap_occured = TRUE;
3761 				goto done1;
3762 			}
3763 			dhd_bus_dump_trap_info(bus, &strbuf);
3764 		}
3765 	}
3766 
3767 	if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
3768 		DHD_FWLOG(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
3769 
3770 		dhd_bus_dump_console_buffer(bus);
3771 		dhd_prot_debug_info_print(bus->dhd);
3772 
3773 #if defined(DHD_FW_COREDUMP)
3774 		/* save core dump or write to a file */
3775 		if (bus->dhd->memdump_enabled) {
3776 #ifdef DHD_SSSR_DUMP
3777 			bus->dhd->collect_sssr = TRUE;
3778 #endif /* DHD_SSSR_DUMP */
3779 			bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
3780 			dhdpcie_mem_dump(bus);
3781 		}
3782 #endif /* DHD_FW_COREDUMP */
3783 
3784 		/* set the trap occured flag only after all the memdump,
3785 		* logdump and sssr dump collection has been scheduled
3786 		*/
3787 		if (dongle_trap_occured) {
3788 			bus->dhd->dongle_trap_occured = TRUE;
3789 		}
3790 
3791 		/* wake up IOCTL wait event */
3792 		dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
3793 
3794 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
3795 		copy_hang_info_trap(bus->dhd);
3796 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
3797 		dhd_schedule_reset(bus->dhd);
3798 
3799 	}
3800 
3801 done1:
3802 	if (MULTIBP_ENAB(bus->sih)) {
3803 		dhd_bus_pcie_pwr_req_clear(bus);
3804 	}
3805 
3806 	DHD_GENERAL_LOCK(bus->dhd, flags);
3807 	DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd);
3808 	dhd_os_busbusy_wake(bus->dhd);
3809 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
3810 done2:
3811 	if (mbuffer)
3812 		MFREE(bus->dhd->osh, mbuffer, msize);
3813 	if (str)
3814 		MFREE(bus->dhd->osh, str, maxstrlen);
3815 
3816 	return bcmerror;
3817 } /* dhdpcie_checkdied */
3818 
3819 /* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */
dhdpcie_mem_dump_bugcheck(dhd_bus_t * bus,uint8 * buf)3820 void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf)
3821 {
3822 	int ret = 0;
3823 	int size; /* Full mem size */
3824 	int start; /* Start address */
3825 	int read_size = 0; /* Read size of each iteration */
3826 	uint8 *databuf = buf;
3827 
3828 	if (bus == NULL) {
3829 		return;
3830 	}
3831 
3832 	start = bus->dongle_ram_base;
3833 	read_size = 4;
3834 	/* check for dead bus */
3835 	{
3836 		uint test_word = 0;
3837 		ret = dhdpcie_bus_membytes(bus, FALSE, start, (uint8*)&test_word, read_size);
3838 		/* if read error or bus timeout */
3839 		if (ret || (test_word == 0xFFFFFFFF)) {
3840 			return;
3841 		}
3842 	}
3843 
3844 	/* Get full mem size */
3845 	size = bus->ramsize;
3846 	/* Read mem content */
3847 	while (size)
3848 	{
3849 		read_size = MIN(MEMBLOCK, size);
3850 		if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) {
3851 			return;
3852 		}
3853 
3854 		/* Decrement size and increment start address */
3855 		size -= read_size;
3856 		start += read_size;
3857 		databuf += read_size;
3858 	}
3859 	bus->dhd->soc_ram = buf;
3860 	bus->dhd->soc_ram_length = bus->ramsize;
3861 	return;
3862 }
3863 
3864 #if defined(DHD_FW_COREDUMP)
3865 static int
dhdpcie_get_mem_dump(dhd_bus_t * bus)3866 dhdpcie_get_mem_dump(dhd_bus_t *bus)
3867 {
3868 	int ret = BCME_OK;
3869 	int size = 0;
3870 	int start = 0;
3871 	int read_size = 0; /* Read size of each iteration */
3872 	uint8 *p_buf = NULL, *databuf = NULL;
3873 
3874 	if (!bus) {
3875 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
3876 		return BCME_ERROR;
3877 	}
3878 
3879 	if (!bus->dhd) {
3880 		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
3881 		return BCME_ERROR;
3882 	}
3883 
3884 	size = bus->ramsize; /* Full mem size */
3885 	start = bus->dongle_ram_base; /* Start address */
3886 
3887 	/* Get full mem size */
3888 	p_buf = dhd_get_fwdump_buf(bus->dhd, size);
3889 	if (!p_buf) {
3890 		DHD_ERROR(("%s: Out of memory (%d bytes)\n",
3891 			__FUNCTION__, size));
3892 		return BCME_ERROR;
3893 	}
3894 
3895 	/* Read mem content */
3896 	DHD_TRACE_HW4(("Dump dongle memory\n"));
3897 	databuf = p_buf;
3898 	while (size > 0) {
3899 		read_size = MIN(MEMBLOCK, size);
3900 		ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size);
3901 		if (ret) {
3902 			DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
3903 #ifdef DHD_DEBUG_UART
3904 			bus->dhd->memdump_success = FALSE;
3905 #endif	/* DHD_DEBUG_UART */
3906 			break;
3907 		}
3908 		DHD_TRACE(("."));
3909 
3910 		/* Decrement size and increment start address */
3911 		size -= read_size;
3912 		start += read_size;
3913 		databuf += read_size;
3914 	}
3915 
3916 	return ret;
3917 }
3918 
3919 static int
dhdpcie_mem_dump(dhd_bus_t * bus)3920 dhdpcie_mem_dump(dhd_bus_t *bus)
3921 {
3922 	dhd_pub_t *dhdp;
3923 	int ret;
3924 
3925 #ifdef EXYNOS_PCIE_DEBUG
3926 	exynos_pcie_register_dump(1);
3927 #endif /* EXYNOS_PCIE_DEBUG */
3928 
3929 #ifdef SUPPORT_LINKDOWN_RECOVERY
3930 	if (bus->is_linkdown) {
3931 		DHD_ERROR(("%s: PCIe link is down so skip\n", __FUNCTION__));
3932 		/* panic only for DUMP_MEMFILE_BUGON */
3933 		ASSERT(bus->dhd->memdump_enabled != DUMP_MEMFILE_BUGON);
3934 		return BCME_ERROR;
3935 	}
3936 #endif /* SUPPORT_LINKDOWN_RECOVERY */
3937 
3938 	dhdp = bus->dhd;
3939 	if (!dhdp) {
3940 		DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
3941 		return BCME_ERROR;
3942 	}
3943 
3944 	if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
3945 		DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
3946 		return BCME_ERROR;
3947 	}
3948 
3949 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3950 	if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) < 0)
3951 		return BCME_ERROR;
3952 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
3953 
3954 	ret = dhdpcie_get_mem_dump(bus);
3955 	if (ret) {
3956 		DHD_ERROR(("%s: failed to get mem dump, err=%d\n",
3957 			__FUNCTION__, ret));
3958 		return ret;
3959 	}
3960 
3961 	dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
3962 	/* buf, actually soc_ram free handled in dhd_{free,clear} */
3963 
3964 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3965 	pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
3966 	pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
3967 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
3968 
3969 	return ret;
3970 }
3971 
3972 int
dhd_bus_get_mem_dump(dhd_pub_t * dhdp)3973 dhd_bus_get_mem_dump(dhd_pub_t *dhdp)
3974 {
3975 	if (!dhdp) {
3976 		DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
3977 		return BCME_ERROR;
3978 	}
3979 
3980 	return dhdpcie_get_mem_dump(dhdp->bus);
3981 }
3982 
3983 int
dhd_bus_mem_dump(dhd_pub_t * dhdp)3984 dhd_bus_mem_dump(dhd_pub_t *dhdp)
3985 {
3986 	dhd_bus_t *bus = dhdp->bus;
3987 	int ret = BCME_ERROR;
3988 
3989 	if (dhdp->busstate == DHD_BUS_DOWN) {
3990 		DHD_ERROR(("%s bus is down\n", __FUNCTION__));
3991 		return BCME_ERROR;
3992 	}
3993 
3994 	/* Try to resume if already suspended or suspend in progress */
3995 #ifdef DHD_PCIE_RUNTIMEPM
3996 	dhdpcie_runtime_bus_wake(dhdp, CAN_SLEEP(), __builtin_return_address(0));
3997 #endif /* DHD_PCIE_RUNTIMEPM */
3998 
3999 	/* Skip if still in suspended or suspend in progress */
4000 	if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhdp)) {
4001 		DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
4002 			__FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
4003 		return BCME_ERROR;
4004 	}
4005 
4006 	DHD_OS_WAKE_LOCK(dhdp);
4007 	ret = dhdpcie_mem_dump(bus);
4008 	DHD_OS_WAKE_UNLOCK(dhdp);
4009 	return ret;
4010 }
4011 #endif	/* DHD_FW_COREDUMP */
4012 
4013 int
dhd_socram_dump(dhd_bus_t * bus)4014 dhd_socram_dump(dhd_bus_t *bus)
4015 {
4016 #if defined(DHD_FW_COREDUMP)
4017 	DHD_OS_WAKE_LOCK(bus->dhd);
4018 	dhd_bus_mem_dump(bus->dhd);
4019 	DHD_OS_WAKE_UNLOCK(bus->dhd);
4020 	return 0;
4021 #else
4022 	return -1;
4023 #endif // endif
4024 }
4025 
4026 /**
4027  * Transfers bytes from host to dongle using pio mode.
4028  * Parameter 'address' is a backplane address.
4029  */
4030 static int
dhdpcie_bus_membytes(dhd_bus_t * bus,bool write,ulong address,uint8 * data,uint size)4031 dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size)
4032 {
4033 	uint dsize;
4034 	int detect_endian_flag = 0x01;
4035 	bool little_endian;
4036 
4037 	if (write && bus->is_linkdown) {
4038 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
4039 		return BCME_ERROR;
4040 	}
4041 
4042 	if (MULTIBP_ENAB(bus->sih)) {
4043 		dhd_bus_pcie_pwr_req(bus);
4044 	}
4045 	/* Detect endianness. */
4046 	little_endian = *(char *)&detect_endian_flag;
4047 
4048 	/* In remap mode, adjust address beyond socram and redirect
4049 	 * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
4050 	 * is not backplane accessible
4051 	 */
4052 
4053 	/* Determine initial transfer parameters */
4054 #ifdef DHD_SUPPORT_64BIT
4055 	dsize = sizeof(uint64);
4056 #else /* !DHD_SUPPORT_64BIT */
4057 	dsize = sizeof(uint32);
4058 #endif /* DHD_SUPPORT_64BIT */
4059 
4060 	/* Do the transfer(s) */
4061 	if (write) {
4062 		while (size) {
4063 #ifdef DHD_SUPPORT_64BIT
4064 			if (size >= sizeof(uint64) && little_endian &&	!(address % 8)) {
4065 				dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
4066 			}
4067 #else /* !DHD_SUPPORT_64BIT */
4068 			if (size >= sizeof(uint32) && little_endian &&	!(address % 4)) {
4069 				dhdpcie_bus_wtcm32(bus, address, *((uint32*)data));
4070 			}
4071 #endif /* DHD_SUPPORT_64BIT */
4072 			else {
4073 				dsize = sizeof(uint8);
4074 				dhdpcie_bus_wtcm8(bus, address, *data);
4075 			}
4076 
4077 			/* Adjust for next transfer (if any) */
4078 			if ((size -= dsize)) {
4079 				data += dsize;
4080 				address += dsize;
4081 			}
4082 		}
4083 	} else {
4084 		while (size) {
4085 #ifdef DHD_SUPPORT_64BIT
4086 			if (size >= sizeof(uint64) && little_endian &&	!(address % 8))
4087 			{
4088 				*(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
4089 			}
4090 #else /* !DHD_SUPPORT_64BIT */
4091 			if (size >= sizeof(uint32) && little_endian &&	!(address % 4))
4092 			{
4093 				*(uint32 *)data = dhdpcie_bus_rtcm32(bus, address);
4094 			}
4095 #endif /* DHD_SUPPORT_64BIT */
4096 			else {
4097 				dsize = sizeof(uint8);
4098 				*data = dhdpcie_bus_rtcm8(bus, address);
4099 			}
4100 
4101 			/* Adjust for next transfer (if any) */
4102 			if ((size -= dsize) > 0) {
4103 				data += dsize;
4104 				address += dsize;
4105 			}
4106 		}
4107 	}
4108 	if (MULTIBP_ENAB(bus->sih)) {
4109 		dhd_bus_pcie_pwr_req_clear(bus);
4110 	}
4111 	return BCME_OK;
4112 } /* dhdpcie_bus_membytes */
4113 
4114 /**
4115  * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue
4116  * to the (non flow controlled) flow ring.
4117  */
4118 int BCMFASTPATH
dhd_bus_schedule_queue(struct dhd_bus * bus,uint16 flow_id,bool txs)4119 dhd_bus_schedule_queue(struct dhd_bus  *bus, uint16 flow_id, bool txs)
4120 {
4121 	flow_ring_node_t *flow_ring_node;
4122 	int ret = BCME_OK;
4123 #ifdef DHD_LOSSLESS_ROAMING
4124 	dhd_pub_t *dhdp = bus->dhd;
4125 #endif // endif
4126 	DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
4127 
4128 	/* ASSERT on flow_id */
4129 	if (flow_id >= bus->max_submission_rings) {
4130 		DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__,
4131 			flow_id, bus->max_submission_rings));
4132 		return 0;
4133 	}
4134 
4135 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
4136 
4137 	if (flow_ring_node->prot_info == NULL) {
4138 	    DHD_ERROR((" %s : invalid flow_ring_node \n", __FUNCTION__));
4139 	    return BCME_NOTREADY;
4140 	}
4141 
4142 #ifdef DHD_LOSSLESS_ROAMING
4143 	if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) {
4144 		DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n",
4145 			__FUNCTION__, flow_ring_node->flow_info.tid));
4146 		return BCME_OK;
4147 	}
4148 #endif /* DHD_LOSSLESS_ROAMING */
4149 
4150 	{
4151 		unsigned long flags;
4152 		void *txp = NULL;
4153 		flow_queue_t *queue;
4154 #ifdef DHD_LOSSLESS_ROAMING
4155 		struct ether_header *eh;
4156 		uint8 *pktdata;
4157 #endif /* DHD_LOSSLESS_ROAMING */
4158 
4159 		queue = &flow_ring_node->queue; /* queue associated with flow ring */
4160 
4161 		DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4162 
4163 		if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
4164 			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4165 			return BCME_NOTREADY;
4166 		}
4167 
4168 		while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
4169 			PKTORPHAN(txp);
4170 
4171 			/*
4172 			 * Modifying the packet length caused P2P cert failures.
4173 			 * Specifically on test cases where a packet of size 52 bytes
4174 			 * was injected, the sniffer capture showed 62 bytes because of
4175 			 * which the cert tests failed. So making the below change
4176 			 * only Router specific.
4177 			 */
4178 
4179 #ifdef DHDTCPACK_SUPPRESS
4180 			if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) {
4181 				ret = dhd_tcpack_check_xmit(bus->dhd, txp);
4182 				if (ret != BCME_OK) {
4183 					DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n",
4184 						__FUNCTION__));
4185 				}
4186 			}
4187 #endif /* DHDTCPACK_SUPPRESS */
4188 #ifdef DHD_LOSSLESS_ROAMING
4189 			pktdata = (uint8 *)PKTDATA(OSH_NULL, txp);
4190 			eh = (struct ether_header *) pktdata;
4191 			if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
4192 				uint8 prio = (uint8)PKTPRIO(txp);
4193 				/* Restore to original priority for 802.1X packet */
4194 				if (prio == PRIO_8021D_NC) {
4195 					PKTSETPRIO(txp, dhdp->prio_8021x);
4196 				}
4197 			}
4198 #endif /* DHD_LOSSLESS_ROAMING */
4199 			/* Attempt to transfer packet over flow ring */
4200 			ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
4201 			if (ret != BCME_OK) { /* may not have resources in flow ring */
4202 				DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
4203 				dhd_prot_txdata_write_flush(bus->dhd, flow_id);
4204 				/* reinsert at head */
4205 				dhd_flow_queue_reinsert(bus->dhd, queue, txp);
4206 				DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4207 
4208 				/* If we are able to requeue back, return success */
4209 				return BCME_OK;
4210 			}
4211 		}
4212 
4213 #ifdef DHD_HP2P
4214 		if (!flow_ring_node->hp2p_ring) {
4215 			dhd_prot_txdata_write_flush(bus->dhd, flow_id);
4216 		}
4217 #else
4218 		dhd_prot_txdata_write_flush(bus->dhd, flow_id);
4219 #endif // endif
4220 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4221 	}
4222 
4223 	return ret;
4224 } /* dhd_bus_schedule_queue */
4225 
4226 /** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */
4227 int BCMFASTPATH
dhd_bus_txdata(struct dhd_bus * bus,void * txp,uint8 ifidx)4228 dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
4229 {
4230 	uint16 flowid;
4231 #ifdef IDLE_TX_FLOW_MGMT
4232 	uint8	node_status;
4233 #endif /* IDLE_TX_FLOW_MGMT */
4234 	flow_queue_t *queue;
4235 	flow_ring_node_t *flow_ring_node;
4236 	unsigned long flags;
4237 	int ret = BCME_OK;
4238 	void *txp_pend = NULL;
4239 
4240 	if (!bus->dhd->flowid_allocator) {
4241 		DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
4242 		goto toss;
4243 	}
4244 
4245 	flowid = DHD_PKT_GET_FLOWID(txp);
4246 
4247 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
4248 
4249 	DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
4250 		__FUNCTION__, flowid, flow_ring_node->status, flow_ring_node->active));
4251 
4252 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4253 	if ((flowid >= bus->dhd->num_flow_rings) ||
4254 #ifdef IDLE_TX_FLOW_MGMT
4255 		(!flow_ring_node->active))
4256 #else
4257 		(!flow_ring_node->active) ||
4258 		(flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) ||
4259 		(flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING))
4260 #endif /* IDLE_TX_FLOW_MGMT */
4261 	{
4262 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4263 		DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
4264 			__FUNCTION__, flowid, flow_ring_node->status,
4265 			flow_ring_node->active));
4266 		ret = BCME_ERROR;
4267 			goto toss;
4268 	}
4269 
4270 #ifdef IDLE_TX_FLOW_MGMT
4271 	node_status = flow_ring_node->status;
4272 
4273 	/* handle diffrent status states here!! */
4274 	switch (node_status)
4275 	{
4276 		case FLOW_RING_STATUS_OPEN:
4277 
4278 			if (bus->enable_idle_flowring_mgmt) {
4279 				/* Move the node to the head of active list */
4280 				dhd_flow_ring_move_to_active_list_head(bus, flow_ring_node);
4281 			}
4282 			break;
4283 
4284 		case FLOW_RING_STATUS_SUSPENDED:
4285 			DHD_INFO(("Need to Initiate TX Flow resume\n"));
4286 			/* Issue resume_ring request */
4287 			dhd_bus_flow_ring_resume_request(bus,
4288 					flow_ring_node);
4289 			break;
4290 
4291 		case FLOW_RING_STATUS_CREATE_PENDING:
4292 		case FLOW_RING_STATUS_RESUME_PENDING:
4293 			/* Dont do anything here!! */
4294 			DHD_INFO(("Waiting for Flow create/resume! status is %u\n",
4295 				node_status));
4296 			break;
4297 
4298 		case FLOW_RING_STATUS_DELETE_PENDING:
4299 		default:
4300 			DHD_ERROR(("Dropping packet!! flowid %u status is %u\n",
4301 				flowid, node_status));
4302 			/* error here!! */
4303 			ret = BCME_ERROR;
4304 			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4305 			goto toss;
4306 	}
4307 	/* Now queue the packet */
4308 #endif /* IDLE_TX_FLOW_MGMT */
4309 
4310 	queue = &flow_ring_node->queue; /* queue associated with flow ring */
4311 
4312 	if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK)
4313 		txp_pend = txp;
4314 
4315 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4316 
4317 	if (flow_ring_node->status) {
4318 		DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
4319 		    __FUNCTION__, flowid, flow_ring_node->status,
4320 		    flow_ring_node->active));
4321 		if (txp_pend) {
4322 			txp = txp_pend;
4323 			goto toss;
4324 		}
4325 		return BCME_OK;
4326 	}
4327 	ret = dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
4328 
4329 	/* If we have anything pending, try to push into q */
4330 	if (txp_pend) {
4331 		DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
4332 
4333 		if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
4334 			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4335 			txp = txp_pend;
4336 			goto toss;
4337 		}
4338 
4339 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
4340 	}
4341 
4342 	return ret;
4343 
4344 toss:
4345 	DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
4346 	PKTCFREE(bus->dhd->osh, txp, TRUE);
4347 	return ret;
4348 } /* dhd_bus_txdata */
4349 
4350 void
dhd_bus_stop_queue(struct dhd_bus * bus)4351 dhd_bus_stop_queue(struct dhd_bus *bus)
4352 {
4353 	dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
4354 }
4355 
4356 void
dhd_bus_start_queue(struct dhd_bus * bus)4357 dhd_bus_start_queue(struct dhd_bus *bus)
4358 {
4359 	/*
4360 	 * Tx queue has been stopped due to resource shortage (or)
4361 	 * bus is not in a state to turn on.
4362 	 *
4363 	 * Note that we try to re-start network interface only
4364 	 * when we have enough resources, one has to first change the
4365 	 * flag indicating we have all the resources.
4366 	 */
4367 	if (dhd_prot_check_tx_resource(bus->dhd)) {
4368 		DHD_ERROR(("%s: Interface NOT started, previously stopped "
4369 			"due to resource shortage\n", __FUNCTION__));
4370 		return;
4371 	}
4372 	dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
4373 }
4374 
4375 /* Device console input function */
dhd_bus_console_in(dhd_pub_t * dhd,uchar * msg,uint msglen)4376 int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
4377 {
4378 	dhd_bus_t *bus = dhd->bus;
4379 	uint32 addr, val;
4380 	int rv;
4381 	/* Address could be zero if CONSOLE := 0 in dongle Makefile */
4382 	if (bus->console_addr == 0)
4383 		return BCME_UNSUPPORTED;
4384 
4385 	/* Don't allow input if dongle is in reset */
4386 	if (bus->dhd->dongle_reset) {
4387 		return BCME_NOTREADY;
4388 	}
4389 
4390 	/* Zero cbuf_index */
4391 	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
4392 	val = htol32(0);
4393 	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
4394 		goto done;
4395 
4396 	/* Write message into cbuf */
4397 	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
4398 	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
4399 		goto done;
4400 
4401 	/* Write length into vcons_in */
4402 	addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
4403 	val = htol32(msglen);
4404 	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
4405 		goto done;
4406 
4407 	/* generate an interrupt to dongle to indicate that it needs to process cons command */
4408 	dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT);
4409 done:
4410 	return rv;
4411 } /* dhd_bus_console_in */
4412 
4413 /**
4414  * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is
4415  * contained in 'pkt'. Processes rx frame, forwards up the layer to netif.
4416  */
4417 void BCMFASTPATH
dhd_bus_rx_frame(struct dhd_bus * bus,void * pkt,int ifidx,uint pkt_count)4418 dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count)
4419 {
4420 	dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
4421 }
4422 
4423 void
dhdpcie_setbar1win(dhd_bus_t * bus,uint32 addr)4424 dhdpcie_setbar1win(dhd_bus_t *bus, uint32 addr)
4425 {
4426 	dhdpcie_os_setbar1win(bus, addr);
4427 }
4428 
4429 /** 'offset' is a backplane address */
4430 void
dhdpcie_bus_wtcm8(dhd_bus_t * bus,ulong offset,uint8 data)4431 dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
4432 {
4433 	if (bus->is_linkdown) {
4434 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4435 		return;
4436 	} else {
4437 		dhdpcie_os_wtcm8(bus, offset, data);
4438 	}
4439 }
4440 
4441 uint8
dhdpcie_bus_rtcm8(dhd_bus_t * bus,ulong offset)4442 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
4443 {
4444 	volatile uint8 data;
4445 	if (bus->is_linkdown) {
4446 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4447 		data = (uint8)-1;
4448 	} else {
4449 		data = dhdpcie_os_rtcm8(bus, offset);
4450 	}
4451 	return data;
4452 }
4453 
4454 void
dhdpcie_bus_wtcm32(dhd_bus_t * bus,ulong offset,uint32 data)4455 dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
4456 {
4457 	if (bus->is_linkdown) {
4458 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4459 		return;
4460 	} else {
4461 		dhdpcie_os_wtcm32(bus, offset, data);
4462 	}
4463 }
4464 void
dhdpcie_bus_wtcm16(dhd_bus_t * bus,ulong offset,uint16 data)4465 dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
4466 {
4467 	if (bus->is_linkdown) {
4468 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4469 		return;
4470 	} else {
4471 		dhdpcie_os_wtcm16(bus, offset, data);
4472 	}
4473 }
4474 #ifdef DHD_SUPPORT_64BIT
4475 void
dhdpcie_bus_wtcm64(dhd_bus_t * bus,ulong offset,uint64 data)4476 dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
4477 {
4478 	if (bus->is_linkdown) {
4479 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4480 		return;
4481 	} else {
4482 		dhdpcie_os_wtcm64(bus, offset, data);
4483 	}
4484 }
4485 #endif /* DHD_SUPPORT_64BIT */
4486 
4487 uint16
dhdpcie_bus_rtcm16(dhd_bus_t * bus,ulong offset)4488 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
4489 {
4490 	volatile uint16 data;
4491 	if (bus->is_linkdown) {
4492 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4493 		data = (uint16)-1;
4494 	} else {
4495 		data = dhdpcie_os_rtcm16(bus, offset);
4496 	}
4497 	return data;
4498 }
4499 
4500 uint32
dhdpcie_bus_rtcm32(dhd_bus_t * bus,ulong offset)4501 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
4502 {
4503 	volatile uint32 data;
4504 	if (bus->is_linkdown) {
4505 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4506 		data = (uint32)-1;
4507 	} else {
4508 		data = dhdpcie_os_rtcm32(bus, offset);
4509 	}
4510 	return data;
4511 }
4512 
4513 #ifdef DHD_SUPPORT_64BIT
4514 uint64
dhdpcie_bus_rtcm64(dhd_bus_t * bus,ulong offset)4515 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
4516 {
4517 	volatile uint64 data;
4518 	if (bus->is_linkdown) {
4519 		DHD_LOG_MEM(("%s: PCIe link was down\n", __FUNCTION__));
4520 		data = (uint64)-1;
4521 	} else {
4522 		data = dhdpcie_os_rtcm64(bus, offset);
4523 	}
4524 	return data;
4525 }
4526 #endif /* DHD_SUPPORT_64BIT */
4527 
4528 /** A snippet of dongle memory is shared between host and dongle */
4529 void
dhd_bus_cmn_writeshared(dhd_bus_t * bus,void * data,uint32 len,uint8 type,uint16 ringid)4530 dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid)
4531 {
4532 	uint64 long_data;
4533 	ulong addr; /* dongle address */
4534 
4535 	DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len));
4536 
4537 	if (bus->is_linkdown) {
4538 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
4539 		return;
4540 	}
4541 
4542 	if (MULTIBP_ENAB(bus->sih)) {
4543 		dhd_bus_pcie_pwr_req(bus);
4544 	}
4545 	switch (type) {
4546 		case D2H_DMA_SCRATCH_BUF:
4547 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer);
4548 			long_data = HTOL64(*(uint64 *)data);
4549 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4550 			if (dhd_msg_level & DHD_INFO_VAL) {
4551 				prhex(__FUNCTION__, data, len);
4552 			}
4553 			break;
4554 
4555 		case D2H_DMA_SCRATCH_BUF_LEN :
4556 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_dma_scratch_buffer_len);
4557 			dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4558 			if (dhd_msg_level & DHD_INFO_VAL) {
4559 				prhex(__FUNCTION__, data, len);
4560 			}
4561 			break;
4562 
4563 		case H2D_DMA_INDX_WR_BUF:
4564 			long_data = HTOL64(*(uint64 *)data);
4565 			addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_w_idx_hostaddr);
4566 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4567 			if (dhd_msg_level & DHD_INFO_VAL) {
4568 				prhex(__FUNCTION__, data, len);
4569 			}
4570 			break;
4571 
4572 		case H2D_DMA_INDX_RD_BUF:
4573 			long_data = HTOL64(*(uint64 *)data);
4574 			addr = DHD_RING_INFO_MEMBER_ADDR(bus, h2d_r_idx_hostaddr);
4575 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4576 			if (dhd_msg_level & DHD_INFO_VAL) {
4577 				prhex(__FUNCTION__, data, len);
4578 			}
4579 			break;
4580 
4581 		case D2H_DMA_INDX_WR_BUF:
4582 			long_data = HTOL64(*(uint64 *)data);
4583 			addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_w_idx_hostaddr);
4584 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4585 			if (dhd_msg_level & DHD_INFO_VAL) {
4586 				prhex(__FUNCTION__, data, len);
4587 			}
4588 			break;
4589 
4590 		case D2H_DMA_INDX_RD_BUF:
4591 			long_data = HTOL64(*(uint64 *)data);
4592 			addr = DHD_RING_INFO_MEMBER_ADDR(bus, d2h_r_idx_hostaddr);
4593 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4594 			if (dhd_msg_level & DHD_INFO_VAL) {
4595 				prhex(__FUNCTION__, data, len);
4596 			}
4597 			break;
4598 
4599 		case H2D_IFRM_INDX_WR_BUF:
4600 			long_data = HTOL64(*(uint64 *)data);
4601 			addr = DHD_RING_INFO_MEMBER_ADDR(bus, ifrm_w_idx_hostaddr);
4602 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8*) &long_data, len);
4603 			if (dhd_msg_level & DHD_INFO_VAL) {
4604 				prhex(__FUNCTION__, data, len);
4605 			}
4606 			break;
4607 
4608 		case RING_ITEM_LEN :
4609 			addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, len_items);
4610 			dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4611 			break;
4612 
4613 		case RING_MAX_ITEMS :
4614 			addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, max_item);
4615 			dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4616 			break;
4617 
4618 		case RING_BUF_ADDR :
4619 			long_data = HTOL64(*(uint64 *)data);
4620 			addr = DHD_RING_MEM_MEMBER_ADDR(bus, ringid, base_addr);
4621 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
4622 			if (dhd_msg_level & DHD_INFO_VAL) {
4623 				prhex(__FUNCTION__, data, len);
4624 			}
4625 			break;
4626 
4627 		case RING_WR_UPD :
4628 			addr = bus->ring_sh[ringid].ring_state_w;
4629 			dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4630 			break;
4631 
4632 		case RING_RD_UPD :
4633 			addr = bus->ring_sh[ringid].ring_state_r;
4634 			dhdpcie_bus_wtcm16(bus, addr, (uint16) HTOL16(*(uint16 *)data));
4635 			break;
4636 
4637 		case D2H_MB_DATA:
4638 			addr = bus->d2h_mb_data_ptr_addr;
4639 			dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4640 			break;
4641 
4642 		case H2D_MB_DATA:
4643 			addr = bus->h2d_mb_data_ptr_addr;
4644 			dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4645 			break;
4646 
4647 		case HOST_API_VERSION:
4648 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_cap);
4649 			dhdpcie_bus_wtcm32(bus, addr, (uint32) HTOL32(*(uint32 *)data));
4650 			break;
4651 
4652 		case DNGL_TO_HOST_TRAP_ADDR:
4653 			long_data = HTOL64(*(uint64 *)data);
4654 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_trap_addr);
4655 			dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *) &long_data, len);
4656 			DHD_INFO(("Wrote trap addr:0x%x\n", (uint32) HTOL32(*(uint32 *)data)));
4657 			break;
4658 
4659 		case HOST_SCB_ADDR:
4660 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_addr);
4661 #ifdef DHD_SUPPORT_64BIT
4662 			dhdpcie_bus_wtcm64(bus, addr, (uint64) HTOL64(*(uint64 *)data));
4663 #else /* !DHD_SUPPORT_64BIT */
4664 			dhdpcie_bus_wtcm32(bus, addr, *((uint32*)data));
4665 #endif /* DHD_SUPPORT_64BIT */
4666 			DHD_INFO(("Wrote host_scb_addr:0x%x\n",
4667 				(uint32) HTOL32(*(uint32 *)data)));
4668 			break;
4669 
4670 		default:
4671 			break;
4672 	}
4673 	if (MULTIBP_ENAB(bus->sih)) {
4674 		dhd_bus_pcie_pwr_req_clear(bus);
4675 	}
4676 } /* dhd_bus_cmn_writeshared */
4677 
4678 /** A snippet of dongle memory is shared between host and dongle */
4679 void
dhd_bus_cmn_readshared(dhd_bus_t * bus,void * data,uint8 type,uint16 ringid)4680 dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
4681 {
4682 	ulong addr; /* dongle address */
4683 
4684 	if (MULTIBP_ENAB(bus->sih)) {
4685 		dhd_bus_pcie_pwr_req(bus);
4686 	}
4687 	switch (type) {
4688 		case RING_WR_UPD :
4689 			addr = bus->ring_sh[ringid].ring_state_w;
4690 			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4691 			break;
4692 
4693 		case RING_RD_UPD :
4694 			addr = bus->ring_sh[ringid].ring_state_r;
4695 			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4696 			break;
4697 
4698 		case TOTAL_LFRAG_PACKET_CNT :
4699 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, total_lfrag_pkt_cnt);
4700 			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4701 			break;
4702 
4703 		case H2D_MB_DATA:
4704 			addr = bus->h2d_mb_data_ptr_addr;
4705 			*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
4706 			break;
4707 
4708 		case D2H_MB_DATA:
4709 			addr = bus->d2h_mb_data_ptr_addr;
4710 			*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
4711 			break;
4712 
4713 		case MAX_HOST_RXBUFS :
4714 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, max_host_rxbufs);
4715 			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, addr));
4716 			break;
4717 
4718 		case HOST_SCB_ADDR:
4719 			addr = DHD_PCIE_SHARED_MEMBER_ADDR(bus, host_scb_size);
4720 			*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, addr));
4721 			break;
4722 
4723 		default :
4724 			break;
4725 	}
4726 	if (MULTIBP_ENAB(bus->sih)) {
4727 		dhd_bus_pcie_pwr_req_clear(bus);
4728 	}
4729 }
4730 
dhd_bus_get_sharedflags(dhd_bus_t * bus)4731 uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus)
4732 {
4733 	return ((pciedev_shared_t*)bus->pcie_sh)->flags;
4734 }
4735 
4736 void
dhd_bus_clearcounts(dhd_pub_t * dhdp)4737 dhd_bus_clearcounts(dhd_pub_t *dhdp)
4738 {
4739 }
4740 
4741 /**
4742  * @param params    input buffer, NULL for 'set' operation.
4743  * @param plen      length of 'params' buffer, 0 for 'set' operation.
4744  * @param arg       output buffer
4745  */
4746 int
dhd_bus_iovar_op(dhd_pub_t * dhdp,const char * name,void * params,int plen,void * arg,int len,bool set)4747 dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
4748                  void *params, int plen, void *arg, int len, bool set)
4749 {
4750 	dhd_bus_t *bus = dhdp->bus;
4751 	const bcm_iovar_t *vi = NULL;
4752 	int bcmerror = BCME_UNSUPPORTED;
4753 	int val_size;
4754 	uint32 actionid;
4755 
4756 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4757 
4758 	ASSERT(name);
4759 	ASSERT(len >= 0);
4760 	if (!name || len < 0)
4761 		return BCME_BADARG;
4762 
4763 	/* Get MUST have return space */
4764 	ASSERT(set || (arg && len));
4765 	if (!(set || (arg && len)))
4766 		return BCME_BADARG;
4767 
4768 	/* Set does NOT take qualifiers */
4769 	ASSERT(!set || (!params && !plen));
4770 	if (!(!set || (!params && !plen)))
4771 		return BCME_BADARG;
4772 
4773 	DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
4774 	         name, (set ? "set" : "get"), len, plen));
4775 
4776 	/* Look up var locally; if not found pass to host driver */
4777 	if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
4778 		goto exit;
4779 	}
4780 
4781 	if (MULTIBP_ENAB(bus->sih)) {
4782 		if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) {
4783 			DHD_ERROR(("%s: Bypass pwr request\n", __FUNCTION__));
4784 		} else {
4785 			dhd_bus_pcie_pwr_req(bus);
4786 		}
4787 	}
4788 
4789 	/* set up 'params' pointer in case this is a set command so that
4790 	 * the convenience int and bool code can be common to set and get
4791 	 */
4792 	if (params == NULL) {
4793 		params = arg;
4794 		plen = len;
4795 	}
4796 
4797 	if (vi->type == IOVT_VOID)
4798 		val_size = 0;
4799 	else if (vi->type == IOVT_BUFFER)
4800 		val_size = len;
4801 	else
4802 		/* all other types are integer sized */
4803 		val_size = sizeof(int);
4804 
4805 	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
4806 	bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
4807 
4808 exit:
4809 	/* In DEVRESET_QUIESCE/DEVRESET_ON,
4810 	 * this includes dongle re-attach which initialize pwr_req_ref count to 0 and
4811 	 * causes pwr_req_ref count miss-match in pwr req clear function and hang.
4812 	 * In this case, bypass pwr req clear.
4813 	 */
4814 	if (bcmerror == BCME_DNGL_DEVRESET) {
4815 		bcmerror = BCME_OK;
4816 	} else {
4817 		if (MULTIBP_ENAB(bus->sih)) {
4818 			if (vi->flags & DHD_IOVF_PWRREQ_BYPASS) {
4819 				DHD_ERROR(("%s: Bypass pwr request clear\n", __FUNCTION__));
4820 			} else {
4821 				dhd_bus_pcie_pwr_req_clear(bus);
4822 			}
4823 		}
4824 	}
4825 	return bcmerror;
4826 } /* dhd_bus_iovar_op */
4827 
4828 #ifdef BCM_BUZZZ
4829 #include <bcm_buzzz.h>
4830 
4831 int
dhd_buzzz_dump_cntrs(char * p,uint32 * core,uint32 * log,const int num_counters)4832 dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log,
4833 	const int num_counters)
4834 {
4835 	int bytes = 0;
4836 	uint32 ctr;
4837 	uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX];
4838 	uint32 delta[BCM_BUZZZ_COUNTERS_MAX];
4839 
4840 	/* Compute elapsed counter values per counter event type */
4841 	for (ctr = 0U; ctr < num_counters; ctr++) {
4842 		prev[ctr] = core[ctr];
4843 		curr[ctr] = *log++;
4844 		core[ctr] = curr[ctr];  /* saved for next log */
4845 
4846 		if (curr[ctr] < prev[ctr])
4847 			delta[ctr] = curr[ctr] + (~0U - prev[ctr]);
4848 		else
4849 			delta[ctr] = (curr[ctr] - prev[ctr]);
4850 
4851 		bytes += sprintf(p + bytes, "%12u ", delta[ctr]);
4852 	}
4853 
4854 	return bytes;
4855 }
4856 
4857 typedef union cm3_cnts { /* export this in bcm_buzzz.h */
4858 	uint32 u32;
4859 	uint8  u8[4];
4860 	struct {
4861 		uint8 cpicnt;
4862 		uint8 exccnt;
4863 		uint8 sleepcnt;
4864 		uint8 lsucnt;
4865 	};
4866 } cm3_cnts_t;
4867 
4868 int
dhd_bcm_buzzz_dump_cntrs6(char * p,uint32 * core,uint32 * log)4869 dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log)
4870 {
4871 	int bytes = 0;
4872 
4873 	uint32 cyccnt, instrcnt;
4874 	cm3_cnts_t cm3_cnts;
4875 	uint8 foldcnt;
4876 
4877 	{   /* 32bit cyccnt */
4878 		uint32 curr, prev, delta;
4879 		prev = core[0]; curr = *log++; core[0] = curr;
4880 		if (curr < prev)
4881 			delta = curr + (~0U - prev);
4882 		else
4883 			delta = (curr - prev);
4884 
4885 		bytes += sprintf(p + bytes, "%12u ", delta);
4886 		cyccnt = delta;
4887 	}
4888 
4889 	{	/* Extract the 4 cnts: cpi, exc, sleep and lsu */
4890 		int i;
4891 		uint8 max8 = ~0;
4892 		cm3_cnts_t curr, prev, delta;
4893 		prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32;
4894 		for (i = 0; i < 4; i++) {
4895 			if (curr.u8[i] < prev.u8[i])
4896 				delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]);
4897 			else
4898 				delta.u8[i] = (curr.u8[i] - prev.u8[i]);
4899 			bytes += sprintf(p + bytes, "%4u ", delta.u8[i]);
4900 		}
4901 		cm3_cnts.u32 = delta.u32;
4902 	}
4903 
4904 	{   /* Extract the foldcnt from arg0 */
4905 		uint8 curr, prev, delta, max8 = ~0;
4906 		bcm_buzzz_arg0_t arg0; arg0.u32 = *log;
4907 		prev = core[2]; curr = arg0.klog.cnt; core[2] = curr;
4908 		if (curr < prev)
4909 			delta = curr + (max8 - prev);
4910 		else
4911 			delta = (curr - prev);
4912 		bytes += sprintf(p + bytes, "%4u ", delta);
4913 		foldcnt = delta;
4914 	}
4915 
4916 	instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2]
4917 		                 + cm3_cnts.u8[3]) + foldcnt;
4918 	if (instrcnt > 0xFFFFFF00)
4919 		bytes += sprintf(p + bytes, "[%10s] ", "~");
4920 	else
4921 		bytes += sprintf(p + bytes, "[%10u] ", instrcnt);
4922 	return bytes;
4923 }
4924 
4925 int
dhd_buzzz_dump_log(char * p,uint32 * core,uint32 * log,bcm_buzzz_t * buzzz)4926 dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz)
4927 {
4928 	int bytes = 0;
4929 	bcm_buzzz_arg0_t arg0;
4930 	static uint8 * fmt[] = BCM_BUZZZ_FMT_STRINGS;
4931 
4932 	if (buzzz->counters == 6) {
4933 		bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log);
4934 		log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
4935 	} else {
4936 		bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters);
4937 		log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */
4938 	}
4939 
4940 	/* Dump the logged arguments using the registered formats */
4941 	arg0.u32 = *log++;
4942 
4943 	switch (arg0.klog.args) {
4944 		case 0:
4945 			bytes += sprintf(p + bytes, fmt[arg0.klog.id]);
4946 			break;
4947 		case 1:
4948 		{
4949 			uint32 arg1 = *log++;
4950 			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1);
4951 			break;
4952 		}
4953 		case 2:
4954 		{
4955 			uint32 arg1, arg2;
4956 			arg1 = *log++; arg2 = *log++;
4957 			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2);
4958 			break;
4959 		}
4960 		case 3:
4961 		{
4962 			uint32 arg1, arg2, arg3;
4963 			arg1 = *log++; arg2 = *log++; arg3 = *log++;
4964 			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3);
4965 			break;
4966 		}
4967 		case 4:
4968 		{
4969 			uint32 arg1, arg2, arg3, arg4;
4970 			arg1 = *log++; arg2 = *log++;
4971 			arg3 = *log++; arg4 = *log++;
4972 			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4);
4973 			break;
4974 		}
4975 		default:
4976 			printf("Maximum one argument supported\n");
4977 			break;
4978 	}
4979 
4980 	bytes += sprintf(p + bytes, "\n");
4981 
4982 	return bytes;
4983 }
4984 
dhd_buzzz_dump(bcm_buzzz_t * buzzz_p,void * buffer_p,char * p)4985 void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p)
4986 {
4987 	int i;
4988 	uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX];
4989 	void * log;
4990 
4991 	for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) {
4992 		core[i] = 0;
4993 	}
4994 
4995 	log_sz = buzzz_p->log_sz;
4996 
4997 	part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz;
4998 
4999 	if (buzzz_p->wrap == TRUE) {
5000 		part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz;
5001 		total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz;
5002 	} else {
5003 		part2 = 0U;
5004 		total = buzzz_p->count;
5005 	}
5006 
5007 	if (total == 0U) {
5008 		printf("bcm_buzzz_dump total<%u> done\n", total);
5009 		return;
5010 	} else {
5011 		printf("bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n",
5012 		       total, part2, part1);
5013 	}
5014 
5015 	if (part2) {   /* with wrap */
5016 		log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log));
5017 		while (part2--) {   /* from cur to end : part2 */
5018 			p[0] = '\0';
5019 			dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
5020 			printf("%s", p);
5021 			log = (void*)((size_t)log + buzzz_p->log_sz);
5022 		}
5023 	}
5024 
5025 	log = (void*)buffer_p;
5026 	while (part1--) {
5027 		p[0] = '\0';
5028 		dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
5029 		printf("%s", p);
5030 		log = (void*)((size_t)log + buzzz_p->log_sz);
5031 	}
5032 
5033 	printf("bcm_buzzz_dump done.\n");
5034 }
5035 
dhd_buzzz_dump_dngl(dhd_bus_t * bus)5036 int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
5037 {
5038 	bcm_buzzz_t * buzzz_p = NULL;
5039 	void * buffer_p = NULL;
5040 	char * page_p = NULL;
5041 	pciedev_shared_t *sh;
5042 	int ret = 0;
5043 
5044 	if (bus->dhd->busstate != DHD_BUS_DATA) {
5045 		return BCME_UNSUPPORTED;
5046 	}
5047 	if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) {
5048 		printf("Page memory allocation failure\n");
5049 		goto done;
5050 	}
5051 	if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) {
5052 		printf("BCM BUZZZ memory allocation failure\n");
5053 		goto done;
5054 	}
5055 
5056 	ret = dhdpcie_readshared(bus);
5057 	if (ret < 0) {
5058 		DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
5059 		goto done;
5060 	}
5061 
5062 	sh = bus->pcie_sh;
5063 
5064 	DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzz_dbg_ptr));
5065 
5066 	if (sh->buzz_dbg_ptr != 0U) {	/* Fetch and display dongle BUZZZ Trace */
5067 
5068 		dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzz_dbg_ptr,
5069 		                     (uint8 *)buzzz_p, sizeof(bcm_buzzz_t));
5070 
5071 		printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> "
5072 			"count<%u> status<%u> wrap<%u>\n"
5073 			"cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n",
5074 			(int)sh->buzz_dbg_ptr,
5075 			(int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end,
5076 			buzzz_p->count, buzzz_p->status, buzzz_p->wrap,
5077 			buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group,
5078 			buzzz_p->buffer_sz, buzzz_p->log_sz);
5079 
5080 		if (buzzz_p->count == 0) {
5081 			printf("Empty dongle BUZZZ trace\n\n");
5082 			goto done;
5083 		}
5084 
5085 		/* Allocate memory for trace buffer and format strings */
5086 		buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
5087 		if (buffer_p == NULL) {
5088 			printf("Buffer memory allocation failure\n");
5089 			goto done;
5090 		}
5091 
5092 		/* Fetch the trace. format strings are exported via bcm_buzzz.h */
5093 		dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log,   /* Trace */
5094 		                     (uint8 *)buffer_p, buzzz_p->buffer_sz);
5095 
5096 		/* Process and display the trace using formatted output */
5097 
5098 		{
5099 			int ctr;
5100 			for (ctr = 0; ctr < buzzz_p->counters; ctr++) {
5101 				printf("<Evt[%02X]> ", buzzz_p->eventid[ctr]);
5102 			}
5103 			printf("<code execution point>\n");
5104 		}
5105 
5106 		dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
5107 
5108 		printf("----- End of dongle BCM BUZZZ Trace -----\n\n");
5109 
5110 		MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL;
5111 	}
5112 
5113 done:
5114 
5115 	if (page_p)   MFREE(bus->dhd->osh, page_p, 4096);
5116 	if (buzzz_p)  MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t));
5117 	if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
5118 
5119 	return BCME_OK;
5120 }
5121 #endif /* BCM_BUZZZ */
5122 
5123 #define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) &&	\
5124 	((sih)->buscoretype == PCIE2_CORE_ID))
5125 #ifdef DHD_PCIE_REG_ACCESS
5126 static bool
pcie2_mdiosetblock(dhd_bus_t * bus,uint blk)5127 pcie2_mdiosetblock(dhd_bus_t *bus, uint blk)
5128 {
5129 	uint mdiodata, mdioctrl, i = 0;
5130 	uint pcie_serdes_spinwait = 200;
5131 
5132 	mdioctrl = MDIOCTL2_DIVISOR_VAL | (0x1F << MDIOCTL2_REGADDR_SHF);
5133 	mdiodata = (blk << MDIODATA2_DEVADDR_SHF) | MDIODATA2_DONE;
5134 
5135 	si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdioctrl);
5136 	si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0, mdiodata);
5137 
5138 	OSL_DELAY(10);
5139 	/* retry till the transaction is complete */
5140 	while (i < pcie_serdes_spinwait) {
5141 		uint mdioctrl_read = si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA,
5142 			0, 0);
5143 		if (!(mdioctrl_read & MDIODATA2_DONE)) {
5144 			break;
5145 		}
5146 		OSL_DELAY(1000);
5147 		i++;
5148 	}
5149 
5150 	if (i >= pcie_serdes_spinwait) {
5151 		DHD_ERROR(("pcie_mdiosetblock: timed out\n"));
5152 		return FALSE;
5153 	}
5154 
5155 	return TRUE;
5156 }
5157 #endif /* DHD_PCIE_REG_ACCESS */
5158 
5159 #define PCIE_FLR_CAPAB_BIT		28
5160 #define PCIE_FUNCTION_LEVEL_RESET_BIT	15
5161 
5162 /* Change delays for only QT HW, FPGA and silicon uses same delay */
5163 #ifdef BCMQT_HW
5164 #define DHD_FUNCTION_LEVEL_RESET_DELAY		300000u
5165 #define DHD_SSRESET_STATUS_RETRY_DELAY	10000u
5166 #else
5167 #define DHD_FUNCTION_LEVEL_RESET_DELAY	70u	/* 70 msec delay */
5168 #define DHD_SSRESET_STATUS_RETRY_DELAY	40u
5169 #endif // endif
5170 /*
5171  * Increase SSReset de-assert time to 8ms.
5172  * since it takes longer time if re-scan time on 4378B0.
5173  */
5174 #define DHD_SSRESET_STATUS_RETRIES	200u
5175 
5176 static void
dhdpcie_enum_reg_init(dhd_bus_t * bus)5177 dhdpcie_enum_reg_init(dhd_bus_t *bus)
5178 {
5179 	/* initialize Function control register (clear bit 4) to HW init value */
5180 	si_corereg(bus->sih, bus->sih->buscoreidx,
5181 		OFFSETOF(sbpcieregs_t, ftn_ctrl.control), ~0,
5182 		PCIE_CPLCA_ENABLE | PCIE_DLY_PERST_TO_COE);
5183 
5184 	/* clear IntMask */
5185 	si_corereg(bus->sih, bus->sih->buscoreidx,
5186 		OFFSETOF(sbpcieregs_t, ftn_ctrl.intmask), ~0, 0);
5187 	/* clear IntStatus */
5188 	si_corereg(bus->sih, bus->sih->buscoreidx,
5189 		OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), ~0,
5190 		si_corereg(bus->sih, bus->sih->buscoreidx,
5191 			OFFSETOF(sbpcieregs_t, ftn_ctrl.intstatus), 0, 0));
5192 
5193 	/* clear MSIVector */
5194 	si_corereg(bus->sih, bus->sih->buscoreidx,
5195 		OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_vector), ~0, 0);
5196 	/* clear MSIIntMask */
5197 	si_corereg(bus->sih, bus->sih->buscoreidx,
5198 		OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intmask), ~0, 0);
5199 	/* clear MSIIntStatus */
5200 	si_corereg(bus->sih, bus->sih->buscoreidx,
5201 		OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), ~0,
5202 		si_corereg(bus->sih, bus->sih->buscoreidx,
5203 			OFFSETOF(sbpcieregs_t, ftn_ctrl.msi_intstatus), 0, 0));
5204 
5205 	/* clear PowerIntMask */
5206 	si_corereg(bus->sih, bus->sih->buscoreidx,
5207 		OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intmask), ~0, 0);
5208 	/* clear PowerIntStatus */
5209 	si_corereg(bus->sih, bus->sih->buscoreidx,
5210 		OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), ~0,
5211 		si_corereg(bus->sih, bus->sih->buscoreidx,
5212 			OFFSETOF(sbpcieregs_t, ftn_ctrl.pwr_intstatus), 0, 0));
5213 
5214 	/* clear MailboxIntMask */
5215 	si_corereg(bus->sih, bus->sih->buscoreidx,
5216 		OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intmask), ~0, 0);
5217 	/* clear MailboxInt */
5218 	si_corereg(bus->sih, bus->sih->buscoreidx,
5219 		OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), ~0,
5220 		si_corereg(bus->sih, bus->sih->buscoreidx,
5221 			OFFSETOF(sbpcieregs_t, ftn_ctrl.mbox_intstatus), 0, 0));
5222 }
5223 
5224 int
dhd_bus_perform_flr(dhd_bus_t * bus,bool force_fail)5225 dhd_bus_perform_flr(dhd_bus_t *bus, bool force_fail)
5226 {
5227 	uint flr_capab;
5228 	uint val;
5229 	int retry = 0;
5230 
5231 	DHD_ERROR(("******** Perform FLR ********\n"));
5232 
5233 	if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
5234 		if (bus->pcie_mailbox_mask != 0) {
5235 			dhdpcie_bus_intr_disable(bus);
5236 		}
5237 		/* initialize F0 enum registers before FLR for rev66/67 */
5238 		dhdpcie_enum_reg_init(bus);
5239 	}
5240 
5241 	/* Read PCIE_CFG_DEVICE_CAPABILITY bit 28 to check FLR capability */
5242 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CAPABILITY, sizeof(val));
5243 	flr_capab =  val & (1 << PCIE_FLR_CAPAB_BIT);
5244 	DHD_INFO(("Read Device Capability: reg=0x%x read val=0x%x flr_capab=0x%x\n",
5245 		PCIE_CFG_DEVICE_CAPABILITY, val, flr_capab));
5246 	if (!flr_capab) {
5247 	       DHD_ERROR(("Chip does not support FLR\n"));
5248 	       return BCME_UNSUPPORTED;
5249 	}
5250 	/* WAR: Disable FLR reset  For H2 chip to perform legacy reset */
5251 	else if ((bus->sih->chip == CYW55560_CHIP_ID) || (bus->sih->chip == BCM4375_CHIP_ID)) {
5252 		DHD_INFO(("H2/4375 CHIP return unsupported\n"));
5253 		return BCME_UNSUPPORTED;
5254 	}
5255 
5256 	/* Save pcie config space */
5257 	DHD_INFO(("Save Pcie Config Space\n"));
5258 	DHD_PCIE_CONFIG_SAVE(bus);
5259 
5260 	/* Set bit 15 of PCIE_CFG_DEVICE_CONTROL */
5261 	DHD_INFO(("Set PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
5262 		PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
5263 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
5264 	DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
5265 	val = val | (1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
5266 	DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
5267 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
5268 
5269 	/* wait for DHD_FUNCTION_LEVEL_RESET_DELAY msec */
5270 	DHD_INFO(("Delay of %d msec\n", DHD_FUNCTION_LEVEL_RESET_DELAY));
5271 	OSL_DELAY(DHD_FUNCTION_LEVEL_RESET_DELAY * 1000u);
5272 
5273 	if (force_fail) {
5274 		DHD_ERROR(("Set PCIE_SSRESET_DISABLE_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)\n",
5275 			PCIE_SSRESET_DISABLE_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
5276 		val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
5277 		DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
5278 			val));
5279 		val = val | (1 << PCIE_SSRESET_DISABLE_BIT);
5280 		DHD_ERROR(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
5281 			val));
5282 		OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val), val);
5283 
5284 		val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
5285 		DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_SUBSYSTEM_CONTROL,
5286 			val));
5287 	}
5288 
5289 	/* Clear bit 15 of PCIE_CFG_DEVICE_CONTROL */
5290 	DHD_INFO(("Clear PCIE_FUNCTION_LEVEL_RESET_BIT(%d) of PCIE_CFG_DEVICE_CONTROL(0x%x)\n",
5291 		PCIE_FUNCTION_LEVEL_RESET_BIT, PCIE_CFG_DEVICE_CONTROL));
5292 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val));
5293 	DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
5294 	val = val & ~(1 << PCIE_FUNCTION_LEVEL_RESET_BIT);
5295 	DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIE_CFG_DEVICE_CONTROL, val));
5296 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIE_CFG_DEVICE_CONTROL, sizeof(val), val);
5297 
5298 	/* Wait till bit 13 of PCIE_CFG_SUBSYSTEM_CONTROL is cleared */
5299 	DHD_INFO(("Wait till PCIE_SSRESET_STATUS_BIT(%d) of PCIE_CFG_SUBSYSTEM_CONTROL(0x%x)"
5300 		"is cleared\n",	PCIE_SSRESET_STATUS_BIT, PCIE_CFG_SUBSYSTEM_CONTROL));
5301 	do {
5302 		val = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL, sizeof(val));
5303 		DHD_ERROR(("read_config: reg=0x%x read val=0x%x\n",
5304 			PCIE_CFG_SUBSYSTEM_CONTROL, val));
5305 		val = val & (1 << PCIE_SSRESET_STATUS_BIT);
5306 		OSL_DELAY(DHD_SSRESET_STATUS_RETRY_DELAY);
5307 	} while (val && (retry++ < DHD_SSRESET_STATUS_RETRIES));
5308 
5309 	if (val) {
5310 		DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5311 			PCIE_CFG_SUBSYSTEM_CONTROL, PCIE_SSRESET_STATUS_BIT));
5312 		/* User has to fire the IOVAR again, if force_fail is needed */
5313 		if (force_fail) {
5314 			bus->flr_force_fail = FALSE;
5315 			DHD_ERROR(("%s cleared flr_force_fail flag\n", __FUNCTION__));
5316 		}
5317 		return BCME_DONGLE_DOWN;
5318 	}
5319 
5320 	/* Restore pcie config space */
5321 	DHD_INFO(("Restore Pcie Config Space\n"));
5322 	DHD_PCIE_CONFIG_RESTORE(bus);
5323 
5324 	DHD_ERROR(("******** FLR Succedeed ********\n"));
5325 
5326 	return BCME_OK;
5327 }
5328 
5329 #ifdef DHD_USE_BP_RESET
5330 #define DHD_BP_RESET_ASPM_DISABLE_DELAY	500u	/* usec */
5331 
5332 #define DHD_BP_RESET_STATUS_RETRY_DELAY	40u	/* usec */
5333 #define DHD_BP_RESET_STATUS_RETRIES	50u
5334 
5335 #define PCIE_CFG_SPROM_CTRL_SB_RESET_BIT	10
5336 #define PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT	21
5337 int
dhd_bus_perform_bp_reset(struct dhd_bus * bus)5338 dhd_bus_perform_bp_reset(struct dhd_bus *bus)
5339 {
5340 	uint val;
5341 	int retry = 0;
5342 	uint dar_clk_ctrl_status_reg = DAR_CLK_CTRL(bus->sih->buscorerev);
5343 	int ret = BCME_OK;
5344 	bool cond;
5345 
5346 	DHD_ERROR(("******** Perform BP reset ********\n"));
5347 
5348 	/* Disable ASPM */
5349 	DHD_INFO(("Disable ASPM: Clear bits(1-0) of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
5350 		PCIECFGREG_LINK_STATUS_CTRL));
5351 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
5352 	DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5353 	val = val & (~PCIE_ASPM_ENAB);
5354 	DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5355 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
5356 
5357 	/* wait for delay usec */
5358 	DHD_INFO(("Delay of %d usec\n", DHD_BP_RESET_ASPM_DISABLE_DELAY));
5359 	OSL_DELAY(DHD_BP_RESET_ASPM_DISABLE_DELAY);
5360 
5361 	/* Set bit 10 of PCIECFGREG_SPROM_CTRL */
5362 	DHD_INFO(("Set PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of PCIECFGREG_SPROM_CTRL(0x%x)\n",
5363 		PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
5364 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
5365 	DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
5366 	val = val | (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
5367 	DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
5368 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val), val);
5369 
5370 	/* Wait till bit backplane reset is ASSERTED i,e
5371 	 * bit 10 of PCIECFGREG_SPROM_CTRL is cleared.
5372 	 * Only after this, poll for 21st bit of DAR reg 0xAE0 is valid
5373 	 * else DAR register will read previous old value
5374 	 */
5375 	DHD_INFO(("Wait till PCIE_CFG_SPROM_CTRL_SB_RESET_BIT(%d) of "
5376 		"PCIECFGREG_SPROM_CTRL(0x%x) is cleared\n",
5377 		PCIE_CFG_SPROM_CTRL_SB_RESET_BIT, PCIECFGREG_SPROM_CTRL));
5378 	do {
5379 		val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_SPROM_CTRL, sizeof(val));
5380 		DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_SPROM_CTRL, val));
5381 		cond = val & (1 << PCIE_CFG_SPROM_CTRL_SB_RESET_BIT);
5382 		OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
5383 	} while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
5384 
5385 	if (cond) {
5386 		DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5387 			PCIECFGREG_SPROM_CTRL, PCIE_CFG_SPROM_CTRL_SB_RESET_BIT));
5388 		ret = BCME_ERROR;
5389 		goto aspm_enab;
5390 	}
5391 
5392 	/* Wait till bit 21 of dar_clk_ctrl_status_reg is cleared */
5393 	DHD_INFO(("Wait till PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT(%d) of "
5394 		"dar_clk_ctrl_status_reg(0x%x) is cleared\n",
5395 		PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT, dar_clk_ctrl_status_reg));
5396 	do {
5397 		val = si_corereg(bus->sih, bus->sih->buscoreidx,
5398 			dar_clk_ctrl_status_reg, 0, 0);
5399 		DHD_INFO(("read_dar si_corereg: reg=0x%x read val=0x%x\n",
5400 			dar_clk_ctrl_status_reg, val));
5401 		cond = val & (1 << PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT);
5402 		OSL_DELAY(DHD_BP_RESET_STATUS_RETRY_DELAY);
5403 	} while (cond && (retry++ < DHD_BP_RESET_STATUS_RETRIES));
5404 
5405 	if (cond) {
5406 		DHD_ERROR(("ERROR: reg=0x%x bit %d is not cleared\n",
5407 			dar_clk_ctrl_status_reg, PCIE_CFG_CLOCK_CTRL_STATUS_BP_RESET_BIT));
5408 		ret = BCME_ERROR;
5409 	}
5410 
5411 aspm_enab:
5412 	/* Enable ASPM */
5413 	DHD_INFO(("Enable ASPM: set bit 1 of PCIECFGREG_LINK_STATUS_CTRL(0x%x)\n",
5414 		PCIECFGREG_LINK_STATUS_CTRL));
5415 	val = OSL_PCI_READ_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val));
5416 	DHD_INFO(("read_config: reg=0x%x read val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5417 	val = val | (PCIE_ASPM_L1_ENAB);
5418 	DHD_INFO(("write_config: reg=0x%x write val=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, val));
5419 	OSL_PCI_WRITE_CONFIG(bus->osh, PCIECFGREG_LINK_STATUS_CTRL, sizeof(val), val);
5420 
5421 	DHD_ERROR(("******** BP reset Succedeed ********\n"));
5422 
5423 	return ret;
5424 }
5425 #endif /* DHD_USE_BP_RESET */
5426 
5427 int
dhd_bus_devreset(dhd_pub_t * dhdp,uint8 flag)5428 dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
5429 {
5430 	dhd_bus_t *bus = dhdp->bus;
5431 	int bcmerror = 0;
5432 	unsigned long flags;
5433 	unsigned long flags_bus;
5434 #ifdef CONFIG_ARCH_MSM
5435 	int retry = POWERUP_MAX_RETRY;
5436 #endif /* CONFIG_ARCH_MSM */
5437 
5438 	if (flag == TRUE) { /* Turn off WLAN */
5439 		/* Removing Power */
5440 		DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
5441 		DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
5442 		bus->dhd->up = FALSE;
5443 
5444 		/* wait for other contexts to finish -- if required a call
5445 		* to OSL_DELAY for 1s can be added to give other contexts
5446 		* a chance to finish
5447 		*/
5448 		dhdpcie_advertise_bus_cleanup(bus->dhd);
5449 
5450 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
5451 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5452 			atomic_set(&bus->dhd->block_bus, TRUE);
5453 			dhd_flush_rx_tx_wq(bus->dhd);
5454 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5455 
5456 #ifdef BCMPCIE_OOB_HOST_WAKE
5457 			/* Clean up any pending host wake IRQ */
5458 			dhd_bus_oob_intr_set(bus->dhd, FALSE);
5459 			dhd_bus_oob_intr_unregister(bus->dhd);
5460 #endif /* BCMPCIE_OOB_HOST_WAKE */
5461 			dhd_os_wd_timer(dhdp, 0);
5462 			dhd_bus_stop(bus, TRUE);
5463 			if (bus->intr) {
5464 				DHD_BUS_LOCK(bus->bus_lock, flags_bus);
5465 				dhdpcie_bus_intr_disable(bus);
5466 				DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
5467 				dhdpcie_free_irq(bus);
5468 			}
5469 			dhd_deinit_bus_lock(bus);
5470 			dhd_deinit_backplane_access_lock(bus);
5471 			dhd_bus_release_dongle(bus);
5472 			dhdpcie_bus_free_resource(bus);
5473 			bcmerror = dhdpcie_bus_disable_device(bus);
5474 			if (bcmerror) {
5475 				DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
5476 					__FUNCTION__, bcmerror));
5477 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5478 				atomic_set(&bus->dhd->block_bus, FALSE);
5479 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5480 			}
5481 			/* Clean up protocol data after Bus Master Enable bit clear
5482 			 * so that host can safely unmap DMA and remove the allocated buffers
5483 			 * from the PKTID MAP. Some Applicantion Processors supported
5484 			 * System MMU triggers Kernel panic when they detect to attempt to
5485 			 * DMA-unmapped memory access from the devices which use the
5486 			 * System MMU. Therefore, Kernel panic can be happened since it is
5487 			 * possible that dongle can access to DMA-unmapped memory after
5488 			 * calling the dhd_prot_reset().
5489 			 * For this reason, the dhd_prot_reset() and dhd_clear() functions
5490 			 * should be located after the dhdpcie_bus_disable_device().
5491 			 */
5492 			dhd_prot_reset(dhdp);
5493 			dhd_clear(dhdp);
5494 #ifdef CONFIG_ARCH_MSM
5495 			bcmerror = dhdpcie_bus_clock_stop(bus);
5496 			if (bcmerror) {
5497 				DHD_ERROR(("%s: host clock stop failed: %d\n",
5498 					__FUNCTION__, bcmerror));
5499 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5500 				atomic_set(&bus->dhd->block_bus, FALSE);
5501 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5502 				goto done;
5503 			}
5504 #endif /* CONFIG_ARCH_MSM */
5505 			DHD_GENERAL_LOCK(bus->dhd, flags);
5506 			DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
5507 			bus->dhd->busstate = DHD_BUS_DOWN;
5508 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
5509 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
5510 			atomic_set(&bus->dhd->block_bus, FALSE);
5511 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
5512 		} else {
5513 			if (bus->intr) {
5514 				dhdpcie_free_irq(bus);
5515 			}
5516 #ifdef BCMPCIE_OOB_HOST_WAKE
5517 			/* Clean up any pending host wake IRQ */
5518 			dhd_bus_oob_intr_set(bus->dhd, FALSE);
5519 			dhd_bus_oob_intr_unregister(bus->dhd);
5520 #endif /* BCMPCIE_OOB_HOST_WAKE */
5521 			dhd_dpc_kill(bus->dhd);
5522 			if (!bus->no_bus_init) {
5523 				dhd_bus_release_dongle(bus);
5524 				dhdpcie_bus_free_resource(bus);
5525 				bcmerror = dhdpcie_bus_disable_device(bus);
5526 				if (bcmerror) {
5527 					DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
5528 						__FUNCTION__, bcmerror));
5529 				}
5530 
5531 				/* Clean up protocol data after Bus Master Enable bit clear
5532 				 * so that host can safely unmap DMA and remove the allocated
5533 				 * buffers from the PKTID MAP. Some Applicantion Processors
5534 				 * supported System MMU triggers Kernel panic when they detect
5535 				 * to attempt to DMA-unmapped memory access from the devices
5536 				 * which use the System MMU.
5537 				 * Therefore, Kernel panic can be happened since it is possible
5538 				 * that dongle can access to DMA-unmapped memory after calling
5539 				 * the dhd_prot_reset().
5540 				 * For this reason, the dhd_prot_reset() and dhd_clear() functions
5541 				 * should be located after the dhdpcie_bus_disable_device().
5542 				 */
5543 				dhd_prot_reset(dhdp);
5544 				dhd_clear(dhdp);
5545 			} else {
5546 				bus->no_bus_init = FALSE;
5547 			}
5548 #ifdef CONFIG_ARCH_MSM
5549 			bcmerror = dhdpcie_bus_clock_stop(bus);
5550 			if (bcmerror) {
5551 				DHD_ERROR(("%s: host clock stop failed: %d\n",
5552 					__FUNCTION__, bcmerror));
5553 				goto done;
5554 			}
5555 #endif  /* CONFIG_ARCH_MSM */
5556 		}
5557 
5558 		bus->dhd->dongle_reset = TRUE;
5559 		DHD_ERROR(("%s:  WLAN OFF Done\n", __FUNCTION__));
5560 
5561 	} else { /* Turn on WLAN */
5562 		if (bus->dhd->busstate == DHD_BUS_DOWN) {
5563 			/* Powering On */
5564 			DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
5565 #ifdef CONFIG_ARCH_MSM
5566 			while (--retry) {
5567 				bcmerror = dhdpcie_bus_clock_start(bus);
5568 				if (!bcmerror) {
5569 					DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
5570 						__FUNCTION__));
5571 					break;
5572 				} else {
5573 					OSL_SLEEP(10);
5574 				}
5575 			}
5576 
5577 			if (bcmerror && !retry) {
5578 				DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
5579 					__FUNCTION__, bcmerror));
5580 				goto done;
5581 			}
5582 #if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
5583 			dhd_bus_aspm_enable_rc_ep(bus, FALSE);
5584 #endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
5585 #endif /* CONFIG_ARCH_MSM */
5586 			bus->is_linkdown = 0;
5587 			bus->cto_triggered = 0;
5588 #ifdef SUPPORT_LINKDOWN_RECOVERY
5589 			bus->read_shm_fail = FALSE;
5590 #endif /* SUPPORT_LINKDOWN_RECOVERY */
5591 			bcmerror = dhdpcie_bus_enable_device(bus);
5592 			if (bcmerror) {
5593 				DHD_ERROR(("%s: host configuration restore failed: %d\n",
5594 					__FUNCTION__, bcmerror));
5595 				goto done;
5596 			}
5597 
5598 			bcmerror = dhdpcie_bus_alloc_resource(bus);
5599 			if (bcmerror) {
5600 				DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
5601 					__FUNCTION__, bcmerror));
5602 				goto done;
5603 			}
5604 
5605 			bcmerror = dhdpcie_bus_dongle_attach(bus);
5606 			if (bcmerror) {
5607 				DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
5608 					__FUNCTION__, bcmerror));
5609 				goto done;
5610 			}
5611 
5612 			bcmerror = dhd_bus_request_irq(bus);
5613 			if (bcmerror) {
5614 				DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
5615 					__FUNCTION__, bcmerror));
5616 				goto done;
5617 			}
5618 
5619 			bus->dhd->dongle_reset = FALSE;
5620 
5621 #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
5622 			dhd_irq_set_affinity(bus->dhd, cpumask_of(1));
5623 #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
5624 
5625 			bcmerror = dhd_bus_start(dhdp);
5626 			if (bcmerror) {
5627 				DHD_ERROR(("%s: dhd_bus_start: %d\n",
5628 					__FUNCTION__, bcmerror));
5629 				goto done;
5630 			}
5631 
5632 			bus->dhd->up = TRUE;
5633 			/* Renabling watchdog which is disabled in dhdpcie_advertise_bus_cleanup */
5634 			if (bus->dhd->dhd_watchdog_ms_backup) {
5635 				DHD_ERROR(("%s: Enabling wdtick after dhd init\n",
5636 					__FUNCTION__));
5637 				dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
5638 			}
5639 			DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
5640 		} else {
5641 			DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
5642 			goto done;
5643 		}
5644 	}
5645 
5646 done:
5647 	if (bcmerror) {
5648 		DHD_GENERAL_LOCK(bus->dhd, flags);
5649 		DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
5650 		bus->dhd->busstate = DHD_BUS_DOWN;
5651 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
5652 	}
5653 	return bcmerror;
5654 }
5655 
5656 #ifdef DHD_PCIE_REG_ACCESS
5657 static int
pcie2_mdioop(dhd_bus_t * bus,uint physmedia,uint regaddr,bool write,uint * val,bool slave_bypass)5658 pcie2_mdioop(dhd_bus_t *bus, uint physmedia, uint regaddr, bool write, uint *val,
5659 	bool slave_bypass)
5660 {
5661 	uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl;
5662 	uint32 reg32;
5663 
5664 	pcie2_mdiosetblock(bus, physmedia);
5665 
5666 	/* enable mdio access to SERDES */
5667 	mdio_ctrl = MDIOCTL2_DIVISOR_VAL;
5668 	mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF);
5669 
5670 	if (slave_bypass)
5671 		mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS;
5672 
5673 	if (!write)
5674 		mdio_ctrl |= MDIOCTL2_READ;
5675 
5676 	si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdio_ctrl);
5677 
5678 	if (write) {
5679 		reg32 =  PCIE2_MDIO_WR_DATA;
5680 		si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0,
5681 			*val | MDIODATA2_DONE);
5682 	} else
5683 		reg32 =  PCIE2_MDIO_RD_DATA;
5684 
5685 	/* retry till the transaction is complete */
5686 	while (i < pcie_serdes_spinwait) {
5687 		uint done_val =  si_corereg(bus->sih, bus->sih->buscoreidx, reg32, 0, 0);
5688 		if (!(done_val & MDIODATA2_DONE)) {
5689 			if (!write) {
5690 				*val = si_corereg(bus->sih, bus->sih->buscoreidx,
5691 					PCIE2_MDIO_RD_DATA, 0, 0);
5692 				*val = *val & MDIODATA2_MASK;
5693 			}
5694 			return 0;
5695 		}
5696 		OSL_DELAY(1000);
5697 		i++;
5698 	}
5699 	return -1;
5700 }
5701 #endif /* DHD_PCIE_REG_ACCESS */
5702 
5703 /* si_backplane_access() manages a shared resource - BAR0 mapping, hence its
5704  * calls shall be serialized. This wrapper function provides such serialization
5705  * and shall be used everywjer einstead of direct call of si_backplane_access()
5706  *
5707  * Linux DHD driver calls si_backplane_access() from 3 three contexts: tasklet
5708  * (that may call dhdpcie_sssr_dump() from dhdpcie_sssr_dump()), iovar
5709  * ("sbreg", "membyres", etc.) and procfs (used by GDB proxy). To avoid race
5710  * conditions calls of si_backplane_access() shall be serialized. Presence of
5711  * tasklet context implies that serialization shall b ebased on spinlock. Hence
5712  * Linux implementation of dhd_pcie_backplane_access_[un]lock() is
5713  * spinlock-based.
5714  *
5715  * Other platforms may add their own implementations of
5716  * dhd_pcie_backplane_access_[un]lock() as needed (e.g. if serialization is not
5717  * needed implementation might be empty)
5718  */
5719 static uint
serialized_backplane_access(dhd_bus_t * bus,uint addr,uint size,uint * val,bool read)5720 serialized_backplane_access(dhd_bus_t *bus, uint addr, uint size, uint *val, bool read)
5721 {
5722 	uint ret;
5723 	unsigned long flags;
5724 	DHD_BACKPLANE_ACCESS_LOCK(bus->backplane_access_lock, flags);
5725 	ret = si_backplane_access(bus->sih, addr, size, val, read);
5726 	DHD_BACKPLANE_ACCESS_UNLOCK(bus->backplane_access_lock, flags);
5727 	return ret;
5728 }
5729 
5730 static int
dhdpcie_get_dma_ring_indices(dhd_pub_t * dhd)5731 dhdpcie_get_dma_ring_indices(dhd_pub_t *dhd)
5732 {
5733 	int h2d_support, d2h_support;
5734 
5735 	d2h_support = dhd->dma_d2h_ring_upd_support ? 1 : 0;
5736 	h2d_support = dhd->dma_h2d_ring_upd_support ? 1 : 0;
5737 	return (d2h_support | (h2d_support << 1));
5738 
5739 }
5740 int
dhdpcie_set_dma_ring_indices(dhd_pub_t * dhd,int32 int_val)5741 dhdpcie_set_dma_ring_indices(dhd_pub_t *dhd, int32 int_val)
5742 {
5743 	int bcmerror = 0;
5744 	/* Can change it only during initialization/FW download */
5745 	if (dhd->busstate == DHD_BUS_DOWN) {
5746 		if ((int_val > 3) || (int_val < 0)) {
5747 			DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
5748 			bcmerror = BCME_BADARG;
5749 		} else {
5750 			dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
5751 			dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
5752 			dhd->dma_ring_upd_overwrite = TRUE;
5753 		}
5754 	} else {
5755 		DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
5756 			__FUNCTION__));
5757 		bcmerror = BCME_NOTDOWN;
5758 	}
5759 
5760 	return bcmerror;
5761 
5762 }
5763 /**
5764  * IOVAR handler of the DHD bus layer (in this case, the PCIe bus).
5765  *
5766  * @param actionid  e.g. IOV_SVAL(IOV_PCIEREG)
5767  * @param params    input buffer
5768  * @param plen      length in [bytes] of input buffer 'params'
5769  * @param arg       output buffer
5770  * @param len       length in [bytes] of output buffer 'arg'
5771  */
5772 static int
dhdpcie_bus_doiovar(dhd_bus_t * bus,const bcm_iovar_t * vi,uint32 actionid,const char * name,void * params,int plen,void * arg,int len,int val_size)5773 dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
5774                 void *params, int plen, void *arg, int len, int val_size)
5775 {
5776 	int bcmerror = 0;
5777 	int32 int_val = 0;
5778 	int32 int_val2 = 0;
5779 	int32 int_val3 = 0;
5780 	bool bool_val = 0;
5781 
5782 	DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
5783 	           __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
5784 
5785 	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
5786 		goto exit;
5787 
5788 	if (plen >= (int)sizeof(int_val))
5789 		bcopy(params, &int_val, sizeof(int_val));
5790 
5791 	if (plen >= (int)sizeof(int_val) * 2)
5792 		bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2));
5793 
5794 	if (plen >= (int)sizeof(int_val) * 3)
5795 		bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3));
5796 
5797 	bool_val = (int_val != 0) ? TRUE : FALSE;
5798 
5799 	/* Check if dongle is in reset. If so, only allow DEVRESET iovars */
5800 	if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
5801 	                                actionid == IOV_GVAL(IOV_DEVRESET))) {
5802 		bcmerror = BCME_NOTREADY;
5803 		goto exit;
5804 	}
5805 
5806 	switch (actionid) {
5807 
5808 	case IOV_SVAL(IOV_VARS):
5809 		bcmerror = dhdpcie_downloadvars(bus, arg, len);
5810 		break;
5811 #ifdef DHD_PCIE_REG_ACCESS
5812 	case IOV_SVAL(IOV_PCIEREG):
5813 		si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
5814 			int_val);
5815 		si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
5816 			int_val2);
5817 		break;
5818 
5819 	case IOV_GVAL(IOV_PCIEREG):
5820 		si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
5821 			int_val);
5822 		int_val = si_corereg(bus->sih, bus->sih->buscoreidx,
5823 			OFFSETOF(sbpcieregs_t, configdata), 0, 0);
5824 		bcopy(&int_val, arg, sizeof(int_val));
5825 		break;
5826 
5827 	case IOV_SVAL(IOV_PCIECOREREG):
5828 		si_corereg(bus->sih, bus->sih->buscoreidx, int_val, ~0, int_val2);
5829 		break;
5830 	case IOV_GVAL(IOV_BAR0_SECWIN_REG):
5831 	{
5832 		sdreg_t sdreg;
5833 		uint32 addr, size;
5834 
5835 		bcopy(params, &sdreg, sizeof(sdreg));
5836 
5837 		addr = sdreg.offset;
5838 		size = sdreg.func;
5839 
5840 		if (serialized_backplane_access(bus, addr, size, (uint *)&int_val, TRUE) != BCME_OK)
5841 		{
5842 			DHD_ERROR(("Invalid size/addr combination \n"));
5843 			bcmerror = BCME_ERROR;
5844 			break;
5845 		}
5846 		bcopy(&int_val, arg, sizeof(int32));
5847 		break;
5848 	}
5849 
5850 	case IOV_SVAL(IOV_BAR0_SECWIN_REG):
5851 	{
5852 		sdreg_t sdreg;
5853 		uint32 addr, size;
5854 
5855 		bcopy(params, &sdreg, sizeof(sdreg));
5856 
5857 		addr = sdreg.offset;
5858 		size = sdreg.func;
5859 		if (serialized_backplane_access(bus, addr, size,
5860 			(uint *)(&sdreg.value), FALSE) != BCME_OK) {
5861 			DHD_ERROR(("Invalid size/addr combination \n"));
5862 			bcmerror = BCME_ERROR;
5863 		}
5864 		break;
5865 	}
5866 
5867 	case IOV_GVAL(IOV_SBREG):
5868 	{
5869 		sdreg_t sdreg;
5870 		uint32 addr, size;
5871 
5872 		bcopy(params, &sdreg, sizeof(sdreg));
5873 
5874 		addr = sdreg.offset | SI_ENUM_BASE(bus->sih);
5875 		size = sdreg.func;
5876 
5877 		if (serialized_backplane_access(bus, addr, size, (uint *)&int_val, TRUE) != BCME_OK)
5878 		{
5879 			DHD_ERROR(("Invalid size/addr combination \n"));
5880 			bcmerror = BCME_ERROR;
5881 			break;
5882 		}
5883 		bcopy(&int_val, arg, size);
5884 		break;
5885 	}
5886 
5887 	case IOV_SVAL(IOV_SBREG):
5888 	{
5889 		sdreg_t sdreg;
5890 		uint32 addr, size;
5891 
5892 		bcopy(params, &sdreg, sizeof(sdreg));
5893 
5894 		addr = sdreg.offset | SI_ENUM_BASE(bus->sih);
5895 		size = sdreg.func;
5896 		if (serialized_backplane_access(bus, addr, size,
5897 			(uint *)(&sdreg.value), FALSE) != BCME_OK) {
5898 			DHD_ERROR(("Invalid size/addr combination \n"));
5899 			bcmerror = BCME_ERROR;
5900 		}
5901 		break;
5902 	}
5903 
5904 	case IOV_GVAL(IOV_PCIESERDESREG):
5905 	{
5906 		uint val;
5907 		if (!PCIE_GEN2(bus->sih)) {
5908 			DHD_ERROR(("supported only in pcie gen2\n"));
5909 			bcmerror = BCME_ERROR;
5910 			break;
5911 		}
5912 
5913 		if (!pcie2_mdioop(bus, int_val, int_val2, FALSE, &val, FALSE)) {
5914 			bcopy(&val, arg, sizeof(int32));
5915 		} else {
5916 			DHD_ERROR(("pcie2_mdioop failed.\n"));
5917 			bcmerror = BCME_ERROR;
5918 		}
5919 		break;
5920 	}
5921 
5922 	case IOV_SVAL(IOV_PCIESERDESREG):
5923 		if (!PCIE_GEN2(bus->sih)) {
5924 			DHD_ERROR(("supported only in pcie gen2\n"));
5925 			bcmerror = BCME_ERROR;
5926 			break;
5927 		}
5928 		if (pcie2_mdioop(bus, int_val, int_val2, TRUE, (uint *)&int_val3, FALSE)) {
5929 			DHD_ERROR(("pcie2_mdioop failed.\n"));
5930 			bcmerror = BCME_ERROR;
5931 		}
5932 		break;
5933 	case IOV_GVAL(IOV_PCIECOREREG):
5934 		int_val = si_corereg(bus->sih, bus->sih->buscoreidx, int_val, 0, 0);
5935 		bcopy(&int_val, arg, sizeof(int_val));
5936 		break;
5937 
5938 	case IOV_SVAL(IOV_PCIECFGREG):
5939 		OSL_PCI_WRITE_CONFIG(bus->osh, int_val, 4, int_val2);
5940 		break;
5941 
5942 	case IOV_GVAL(IOV_PCIECFGREG):
5943 		int_val = OSL_PCI_READ_CONFIG(bus->osh, int_val, 4);
5944 		bcopy(&int_val, arg, sizeof(int_val));
5945 		break;
5946 #endif /* DHD_PCIE_REG_ACCESS */
5947 	case IOV_SVAL(IOV_PCIE_LPBK):
5948 		bcmerror = dhdpcie_bus_lpback_req(bus, int_val);
5949 		break;
5950 
5951 	case IOV_SVAL(IOV_PCIE_DMAXFER): {
5952 		dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)arg;
5953 
5954 		if (!dmaxfer)
5955 			return BCME_BADARG;
5956 		if (dmaxfer->version != DHD_DMAXFER_VERSION)
5957 			return BCME_VERSION;
5958 		if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
5959 			return BCME_BADLEN;
5960 		}
5961 
5962 		bcmerror = dhdpcie_bus_dmaxfer_req(bus, dmaxfer->num_bytes,
5963 				dmaxfer->src_delay, dmaxfer->dest_delay,
5964 				dmaxfer->type, dmaxfer->core_num,
5965 				dmaxfer->should_wait);
5966 
5967 		if (dmaxfer->should_wait && bcmerror >= 0) {
5968 			bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
5969 		}
5970 		break;
5971 	}
5972 
5973 	case IOV_GVAL(IOV_PCIE_DMAXFER): {
5974 		dma_xfer_info_t *dmaxfer = (dma_xfer_info_t *)params;
5975 		if (!dmaxfer)
5976 			return BCME_BADARG;
5977 		if (dmaxfer->version != DHD_DMAXFER_VERSION)
5978 			return BCME_VERSION;
5979 		if (dmaxfer->length != sizeof(dma_xfer_info_t)) {
5980 			return BCME_BADLEN;
5981 		}
5982 		bcmerror = dhdmsgbuf_dmaxfer_status(bus->dhd, dmaxfer);
5983 		break;
5984 	}
5985 
5986 	case IOV_GVAL(IOV_PCIE_SUSPEND):
5987 		int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
5988 		bcopy(&int_val, arg, val_size);
5989 		break;
5990 
5991 	case IOV_SVAL(IOV_PCIE_SUSPEND):
5992 		if (bool_val) { /* Suspend */
5993 			int ret;
5994 			unsigned long flags;
5995 
5996 			/*
5997 			 * If some other context is busy, wait until they are done,
5998 			 * before starting suspend
5999 			 */
6000 			ret = dhd_os_busbusy_wait_condition(bus->dhd,
6001 				&bus->dhd->dhd_bus_busy_state, DHD_BUS_BUSY_IN_DHD_IOVAR);
6002 			if (ret == 0) {
6003 				DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n",
6004 					__FUNCTION__, bus->dhd->dhd_bus_busy_state));
6005 				return BCME_BUSY;
6006 			}
6007 
6008 			DHD_GENERAL_LOCK(bus->dhd, flags);
6009 			DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
6010 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
6011 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
6012 			dhdpcie_bus_suspend(bus, TRUE, TRUE);
6013 #else
6014 			dhdpcie_bus_suspend(bus, TRUE);
6015 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
6016 
6017 			DHD_GENERAL_LOCK(bus->dhd, flags);
6018 			DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
6019 			dhd_os_busbusy_wake(bus->dhd);
6020 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
6021 		} else { /* Resume */
6022 			unsigned long flags;
6023 			DHD_GENERAL_LOCK(bus->dhd, flags);
6024 			DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
6025 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
6026 
6027 			dhdpcie_bus_suspend(bus, FALSE);
6028 
6029 			DHD_GENERAL_LOCK(bus->dhd, flags);
6030 			DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
6031 			dhd_os_busbusy_wake(bus->dhd);
6032 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
6033 		}
6034 		break;
6035 
6036 	case IOV_GVAL(IOV_MEMSIZE):
6037 		int_val = (int32)bus->ramsize;
6038 		bcopy(&int_val, arg, val_size);
6039 		break;
6040 #ifdef DHD_BUS_MEM_ACCESS
6041 	case IOV_SVAL(IOV_MEMBYTES):
6042 	case IOV_GVAL(IOV_MEMBYTES):
6043 	{
6044 		uint32 address;		/* absolute backplane address */
6045 		uint size, dsize;
6046 		uint8 *data;
6047 
6048 		bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
6049 
6050 		ASSERT(plen >= 2*sizeof(int));
6051 
6052 		address = (uint32)int_val;
6053 		bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
6054 		size = (uint)int_val;
6055 
6056 		/* Do some validation */
6057 		dsize = set ? plen - (2 * sizeof(int)) : len;
6058 		if (dsize < size) {
6059 			DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
6060 			           __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
6061 			bcmerror = BCME_BADARG;
6062 			break;
6063 		}
6064 
6065 		DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n dsize %d ", __FUNCTION__,
6066 		          (set ? "write" : "read"), size, address, dsize));
6067 
6068 		/* check if CR4 */
6069 		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
6070 		    si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
6071 			/* if address is 0, store the reset instruction to be written in 0 */
6072 			if (set && address == bus->dongle_ram_base) {
6073 				bus->resetinstr = *(((uint32*)params) + 2);
6074 			}
6075 		} else {
6076 		/* If we know about SOCRAM, check for a fit */
6077 		if ((bus->orig_ramsize) &&
6078 		    ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize)))
6079 		{
6080 			uint8 enable, protect, remap;
6081 			si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
6082 			if (!enable || protect) {
6083 				DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n",
6084 					__FUNCTION__, bus->orig_ramsize, size, address));
6085 				DHD_ERROR(("%s: socram enable %d, protect %d\n",
6086 					__FUNCTION__, enable, protect));
6087 				bcmerror = BCME_BADARG;
6088 				break;
6089 			}
6090 
6091 			if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) {
6092 				uint32 devramsize = si_socdevram_size(bus->sih);
6093 				if ((address < SOCDEVRAM_ARM_ADDR) ||
6094 					(address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) {
6095 					DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n",
6096 						__FUNCTION__, address, size));
6097 					DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n",
6098 						__FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize));
6099 					bcmerror = BCME_BADARG;
6100 					break;
6101 				}
6102 				/* move it such that address is real now */
6103 				address -= SOCDEVRAM_ARM_ADDR;
6104 				address += SOCDEVRAM_BP_ADDR;
6105 				DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n",
6106 					__FUNCTION__, (set ? "write" : "read"), size, address));
6107 			} else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) {
6108 				/* Can not access remap region while devram remap bit is set
6109 				 * ROM content would be returned in this case
6110 				 */
6111 				DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n",
6112 					__FUNCTION__, address));
6113 				bcmerror = BCME_ERROR;
6114 				break;
6115 			}
6116 		}
6117 		}
6118 
6119 		/* Generate the actual data pointer */
6120 		data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
6121 
6122 		/* Call to do the transfer */
6123 		bcmerror = dhdpcie_bus_membytes(bus, set, address, data, size);
6124 
6125 		break;
6126 	}
6127 #endif /* DHD_BUS_MEM_ACCESS */
6128 
6129 	/* Debug related. Dumps core registers or one of the dongle memory */
6130 	case IOV_GVAL(IOV_DUMP_DONGLE):
6131 	{
6132 		dump_dongle_in_t ddi = *(dump_dongle_in_t*)params;
6133 		dump_dongle_out_t *ddo = (dump_dongle_out_t*)arg;
6134 		uint32 *p = ddo->val;
6135 		const uint max_offset = 4096 - 1; /* one core contains max 4096/4 registers */
6136 
6137 		if (plen < sizeof(ddi) || len < sizeof(ddo)) {
6138 			bcmerror = BCME_BADARG;
6139 			break;
6140 		}
6141 
6142 		switch (ddi.type) {
6143 		case DUMP_DONGLE_COREREG:
6144 			ddo->n_bytes = 0;
6145 
6146 			if (si_setcoreidx(bus->sih, ddi.index) == NULL) {
6147 				break; // beyond last core: core enumeration ended
6148 			}
6149 
6150 			ddo->address = si_addrspace(bus->sih, CORE_SLAVE_PORT_0, CORE_BASE_ADDR_0);
6151 			ddo->address += ddi.offset; // BP address at which this dump starts
6152 
6153 			ddo->id = si_coreid(bus->sih);
6154 			ddo->rev = si_corerev(bus->sih);
6155 
6156 			while (ddi.offset < max_offset &&
6157 				sizeof(dump_dongle_out_t) + ddo->n_bytes < (uint)len) {
6158 				*p++ = si_corereg(bus->sih, ddi.index, ddi.offset, 0, 0);
6159 				ddi.offset += sizeof(uint32);
6160 				ddo->n_bytes += sizeof(uint32);
6161 			}
6162 			break;
6163 		default:
6164 			// TODO: implement d11 SHM/TPL dumping
6165 			bcmerror = BCME_BADARG;
6166 			break;
6167 		}
6168 		break;
6169 	}
6170 
6171 	/* Debug related. Returns a string with dongle capabilities */
6172 	case IOV_GVAL(IOV_DNGL_CAPS):
6173 	{
6174 		strncpy(arg, bus->dhd->fw_capabilities,
6175 			MIN(strlen(bus->dhd->fw_capabilities), (size_t)len));
6176 		((char*)arg)[len - 1] = '\0';
6177 		break;
6178 	}
6179 
6180 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
6181 	case IOV_SVAL(IOV_GDB_SERVER):
6182 		/* debugger_*() functions may sleep, so cannot hold spinlock */
6183 		DHD_PERIM_UNLOCK(bus->dhd);
6184 		if (int_val > 0) {
6185 			debugger_init((void *) bus, &bus_ops, int_val, SI_ENUM_BASE(bus->sih));
6186 		} else {
6187 			debugger_close();
6188 		}
6189 		DHD_PERIM_LOCK(bus->dhd);
6190 		break;
6191 #endif /* DEBUGGER || DHD_DSCOPE */
6192 
6193 #ifdef BCM_BUZZZ
6194 	/* Dump dongle side buzzz trace to console */
6195 	case IOV_GVAL(IOV_BUZZZ_DUMP):
6196 		bcmerror = dhd_buzzz_dump_dngl(bus);
6197 		break;
6198 #endif /* BCM_BUZZZ */
6199 
6200 	case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
6201 		bcmerror = dhdpcie_bus_download_state(bus, bool_val);
6202 		break;
6203 
6204 	case IOV_GVAL(IOV_RAMSIZE):
6205 		int_val = (int32)bus->ramsize;
6206 		bcopy(&int_val, arg, val_size);
6207 		break;
6208 
6209 	case IOV_SVAL(IOV_RAMSIZE):
6210 		bus->ramsize = int_val;
6211 		bus->orig_ramsize = int_val;
6212 		break;
6213 
6214 	case IOV_GVAL(IOV_RAMSTART):
6215 		int_val = (int32)bus->dongle_ram_base;
6216 		bcopy(&int_val, arg, val_size);
6217 		break;
6218 
6219 	case IOV_GVAL(IOV_CC_NVMSHADOW):
6220 	{
6221 		struct bcmstrbuf dump_b;
6222 
6223 		bcm_binit(&dump_b, arg, len);
6224 		bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b);
6225 		break;
6226 	}
6227 
6228 	case IOV_GVAL(IOV_SLEEP_ALLOWED):
6229 		bool_val = bus->sleep_allowed;
6230 		bcopy(&bool_val, arg, val_size);
6231 		break;
6232 
6233 	case IOV_SVAL(IOV_SLEEP_ALLOWED):
6234 		bus->sleep_allowed = bool_val;
6235 		break;
6236 
6237 	case IOV_GVAL(IOV_DONGLEISOLATION):
6238 		int_val = bus->dhd->dongle_isolation;
6239 		bcopy(&int_val, arg, val_size);
6240 		break;
6241 
6242 	case IOV_SVAL(IOV_DONGLEISOLATION):
6243 		bus->dhd->dongle_isolation = bool_val;
6244 		break;
6245 
6246 	case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD):
6247 		int_val = bus->ltrsleep_on_unload;
6248 		bcopy(&int_val, arg, val_size);
6249 		break;
6250 
6251 	case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD):
6252 		bus->ltrsleep_on_unload = bool_val;
6253 		break;
6254 
6255 	case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK):
6256 	{
6257 		struct bcmstrbuf dump_b;
6258 		bcm_binit(&dump_b, arg, len);
6259 		bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b);
6260 		break;
6261 	}
6262 	case IOV_GVAL(IOV_DMA_RINGINDICES):
6263 	{
6264 		int_val = dhdpcie_get_dma_ring_indices(bus->dhd);
6265 		bcopy(&int_val, arg, sizeof(int_val));
6266 		break;
6267 	}
6268 	case IOV_SVAL(IOV_DMA_RINGINDICES):
6269 		bcmerror = dhdpcie_set_dma_ring_indices(bus->dhd, int_val);
6270 		break;
6271 
6272 	case IOV_GVAL(IOV_METADATA_DBG):
6273 		int_val = dhd_prot_metadata_dbg_get(bus->dhd);
6274 		bcopy(&int_val, arg, val_size);
6275 		break;
6276 	case IOV_SVAL(IOV_METADATA_DBG):
6277 		dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0));
6278 		break;
6279 
6280 	case IOV_GVAL(IOV_RX_METADATALEN):
6281 		int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE);
6282 		bcopy(&int_val, arg, val_size);
6283 		break;
6284 
6285 	case IOV_SVAL(IOV_RX_METADATALEN):
6286 		if (int_val > 64) {
6287 			bcmerror = BCME_BUFTOOLONG;
6288 			break;
6289 		}
6290 		dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE);
6291 		break;
6292 
6293 	case IOV_SVAL(IOV_TXP_THRESHOLD):
6294 		dhd_prot_txp_threshold(bus->dhd, TRUE, int_val);
6295 		break;
6296 
6297 	case IOV_GVAL(IOV_TXP_THRESHOLD):
6298 		int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val);
6299 		bcopy(&int_val, arg, val_size);
6300 		break;
6301 
6302 	case IOV_SVAL(IOV_DB1_FOR_MB):
6303 		if (int_val)
6304 			bus->db1_for_mb = TRUE;
6305 		else
6306 			bus->db1_for_mb = FALSE;
6307 		break;
6308 
6309 	case IOV_GVAL(IOV_DB1_FOR_MB):
6310 		if (bus->db1_for_mb)
6311 			int_val = 1;
6312 		else
6313 			int_val = 0;
6314 		bcopy(&int_val, arg, val_size);
6315 		break;
6316 
6317 	case IOV_GVAL(IOV_TX_METADATALEN):
6318 		int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE);
6319 		bcopy(&int_val, arg, val_size);
6320 		break;
6321 
6322 	case IOV_SVAL(IOV_TX_METADATALEN):
6323 		if (int_val > 64) {
6324 			bcmerror = BCME_BUFTOOLONG;
6325 			break;
6326 		}
6327 		dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE);
6328 		break;
6329 
6330 	case IOV_SVAL(IOV_DEVRESET):
6331 		switch (int_val) {
6332 			case DHD_BUS_DEVRESET_ON:
6333 				bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
6334 				break;
6335 			case DHD_BUS_DEVRESET_OFF:
6336 				bcmerror = dhd_bus_devreset(bus->dhd, (uint8)int_val);
6337 				break;
6338 			case DHD_BUS_DEVRESET_FLR:
6339 				bcmerror = dhd_bus_perform_flr(bus, bus->flr_force_fail);
6340 				break;
6341 			case DHD_BUS_DEVRESET_FLR_FORCE_FAIL:
6342 				bus->flr_force_fail = TRUE;
6343 				break;
6344 			default:
6345 				DHD_ERROR(("%s: invalid argument for devreset\n", __FUNCTION__));
6346 				break;
6347 		}
6348 		break;
6349 	case IOV_SVAL(IOV_FORCE_FW_TRAP):
6350 		if (bus->dhd->busstate == DHD_BUS_DATA)
6351 			dhdpcie_fw_trap(bus);
6352 		else {
6353 			DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__));
6354 			bcmerror = BCME_NOTUP;
6355 		}
6356 		break;
6357 	case IOV_GVAL(IOV_FLOW_PRIO_MAP):
6358 		int_val = bus->dhd->flow_prio_map_type;
6359 		bcopy(&int_val, arg, val_size);
6360 		break;
6361 
6362 	case IOV_SVAL(IOV_FLOW_PRIO_MAP):
6363 		int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val);
6364 		bcopy(&int_val, arg, val_size);
6365 		break;
6366 
6367 #ifdef DHD_PCIE_RUNTIMEPM
6368 	case IOV_GVAL(IOV_IDLETIME):
6369 		if (!(bus->dhd->op_mode & DHD_FLAG_MFG_MODE)) {
6370 			int_val = bus->idletime;
6371 		} else {
6372 			int_val = 0;
6373 		}
6374 		bcopy(&int_val, arg, val_size);
6375 		break;
6376 
6377 	case IOV_SVAL(IOV_IDLETIME):
6378 		if (int_val < 0) {
6379 			bcmerror = BCME_BADARG;
6380 		} else {
6381 			bus->idletime = int_val;
6382 			if (bus->idletime) {
6383 				DHD_ENABLE_RUNTIME_PM(bus->dhd);
6384 			} else {
6385 				DHD_DISABLE_RUNTIME_PM(bus->dhd);
6386 			}
6387 		}
6388 		break;
6389 #endif /* DHD_PCIE_RUNTIMEPM */
6390 
6391 	case IOV_GVAL(IOV_TXBOUND):
6392 		int_val = (int32)dhd_txbound;
6393 		bcopy(&int_val, arg, val_size);
6394 		break;
6395 
6396 	case IOV_SVAL(IOV_TXBOUND):
6397 		dhd_txbound = (uint)int_val;
6398 		break;
6399 
6400 	case IOV_SVAL(IOV_H2D_MAILBOXDATA):
6401 		dhdpcie_send_mb_data(bus, (uint)int_val);
6402 		break;
6403 
6404 	case IOV_SVAL(IOV_INFORINGS):
6405 		dhd_prot_init_info_rings(bus->dhd);
6406 		break;
6407 
6408 	case IOV_SVAL(IOV_H2D_PHASE):
6409 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
6410 			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
6411 				__FUNCTION__));
6412 			bcmerror = BCME_NOTDOWN;
6413 			break;
6414 		}
6415 		if (int_val)
6416 			bus->dhd->h2d_phase_supported = TRUE;
6417 		else
6418 			bus->dhd->h2d_phase_supported = FALSE;
6419 		break;
6420 
6421 	case IOV_GVAL(IOV_H2D_PHASE):
6422 		int_val = (int32) bus->dhd->h2d_phase_supported;
6423 		bcopy(&int_val, arg, val_size);
6424 		break;
6425 
6426 	case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
6427 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
6428 			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
6429 				__FUNCTION__));
6430 			bcmerror = BCME_NOTDOWN;
6431 			break;
6432 		}
6433 		if (int_val)
6434 			bus->dhd->force_dongletrap_on_bad_h2d_phase = TRUE;
6435 		else
6436 			bus->dhd->force_dongletrap_on_bad_h2d_phase = FALSE;
6437 		break;
6438 
6439 	case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
6440 		int_val = (int32) bus->dhd->force_dongletrap_on_bad_h2d_phase;
6441 		bcopy(&int_val, arg, val_size);
6442 		break;
6443 
6444 	case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM):
6445 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
6446 			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
6447 				__FUNCTION__));
6448 			bcmerror = BCME_NOTDOWN;
6449 			break;
6450 		}
6451 		dhd_prot_set_h2d_max_txpost(bus->dhd, (uint16)int_val);
6452 		break;
6453 
6454 	case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM):
6455 		int_val = dhd_prot_get_h2d_max_txpost(bus->dhd);
6456 		bcopy(&int_val, arg, val_size);
6457 		break;
6458 
6459 	case IOV_GVAL(IOV_RXBOUND):
6460 		int_val = (int32)dhd_rxbound;
6461 		bcopy(&int_val, arg, val_size);
6462 		break;
6463 
6464 	case IOV_SVAL(IOV_RXBOUND):
6465 		dhd_rxbound = (uint)int_val;
6466 		break;
6467 
6468 	case IOV_GVAL(IOV_TRAPDATA):
6469 	{
6470 		struct bcmstrbuf dump_b;
6471 		bcm_binit(&dump_b, arg, len);
6472 		bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, FALSE);
6473 		break;
6474 	}
6475 
6476 	case IOV_GVAL(IOV_TRAPDATA_RAW):
6477 	{
6478 		struct bcmstrbuf dump_b;
6479 		bcm_binit(&dump_b, arg, len);
6480 		bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, TRUE);
6481 		break;
6482 	}
6483 #ifdef DHD_PCIE_REG_ACCESS
6484 	case IOV_GVAL(IOV_PCIEASPM): {
6485 		uint8 clkreq = 0;
6486 		uint32 aspm = 0;
6487 
6488 		/* this command is to hide the details, but match the lcreg
6489 		#define PCIE_CLKREQ_ENAB		0x100
6490 		#define PCIE_ASPM_L1_ENAB        	2
6491 		#define PCIE_ASPM_L0s_ENAB       	1
6492 		*/
6493 
6494 		clkreq = dhdpcie_clkreq(bus->dhd->osh, 0, 0);
6495 		aspm = dhdpcie_lcreg(bus->dhd->osh, 0, 0);
6496 
6497 		int_val = ((clkreq & 0x1) << 8) | (aspm & PCIE_ASPM_ENAB);
6498 		bcopy(&int_val, arg, val_size);
6499 		break;
6500 	}
6501 
6502 	case IOV_SVAL(IOV_PCIEASPM): {
6503 		uint32 tmp;
6504 
6505 		tmp = dhdpcie_lcreg(bus->dhd->osh, 0, 0);
6506 		dhdpcie_lcreg(bus->dhd->osh, PCIE_ASPM_ENAB,
6507 			(tmp & ~PCIE_ASPM_ENAB) | (int_val & PCIE_ASPM_ENAB));
6508 
6509 		dhdpcie_clkreq(bus->dhd->osh, 1, ((int_val & 0x100) >> 8));
6510 		break;
6511 	}
6512 #endif /* DHD_PCIE_REG_ACCESS */
6513 	case IOV_SVAL(IOV_HANGREPORT):
6514 		bus->dhd->hang_report = bool_val;
6515 		DHD_ERROR(("%s: Set hang_report as %d\n",
6516 			__FUNCTION__, bus->dhd->hang_report));
6517 		break;
6518 
6519 	case IOV_GVAL(IOV_HANGREPORT):
6520 		int_val = (int32)bus->dhd->hang_report;
6521 		bcopy(&int_val, arg, val_size);
6522 		break;
6523 
6524 	case IOV_SVAL(IOV_CTO_PREVENTION):
6525 		bcmerror = dhdpcie_cto_init(bus, bool_val);
6526 		break;
6527 
6528 	case IOV_GVAL(IOV_CTO_PREVENTION):
6529 		if (bus->sih->buscorerev < 19) {
6530 			bcmerror = BCME_UNSUPPORTED;
6531 			break;
6532 		}
6533 		int_val = (int32)bus->cto_enable;
6534 		bcopy(&int_val, arg, val_size);
6535 		break;
6536 
6537 	case IOV_SVAL(IOV_CTO_THRESHOLD):
6538 		{
6539 			if (bus->sih->buscorerev < 19) {
6540 				bcmerror = BCME_UNSUPPORTED;
6541 				break;
6542 			}
6543 			bus->cto_threshold = (uint32)int_val;
6544 		}
6545 		break;
6546 
6547 	case IOV_GVAL(IOV_CTO_THRESHOLD):
6548 		if (bus->sih->buscorerev < 19) {
6549 			bcmerror = BCME_UNSUPPORTED;
6550 			break;
6551 		}
6552 		if (bus->cto_threshold)
6553 			int_val = (int32)bus->cto_threshold;
6554 		else
6555 			int_val = (int32)PCIE_CTO_TO_THRESH_DEFAULT;
6556 
6557 		bcopy(&int_val, arg, val_size);
6558 		break;
6559 
6560 	case IOV_SVAL(IOV_PCIE_WD_RESET):
6561 		if (bool_val) {
6562 			/* Legacy chipcommon watchdog reset */
6563 			dhdpcie_cc_watchdog_reset(bus);
6564 		}
6565 		break;
6566 
6567 	case IOV_GVAL(IOV_HWA_ENAB_BMAP):
6568 		int_val = bus->hwa_enab_bmap;
6569 		bcopy(&int_val, arg, val_size);
6570 		break;
6571 	case IOV_SVAL(IOV_HWA_ENAB_BMAP):
6572 		bus->hwa_enab_bmap = (uint8)int_val;
6573 		break;
6574 	case IOV_GVAL(IOV_IDMA_ENABLE):
6575 		int_val = bus->idma_enabled;
6576 		bcopy(&int_val, arg, val_size);
6577 		break;
6578 	case IOV_SVAL(IOV_IDMA_ENABLE):
6579 		bus->idma_enabled = (bool)int_val;
6580 		break;
6581 	case IOV_GVAL(IOV_IFRM_ENABLE):
6582 		int_val = bus->ifrm_enabled;
6583 		bcopy(&int_val, arg, val_size);
6584 		break;
6585 	case IOV_SVAL(IOV_IFRM_ENABLE):
6586 		bus->ifrm_enabled = (bool)int_val;
6587 		break;
6588 	case IOV_GVAL(IOV_CLEAR_RING):
6589 		bcopy(&int_val, arg, val_size);
6590 		dhd_flow_rings_flush(bus->dhd, 0);
6591 		break;
6592 	case IOV_GVAL(IOV_DAR_ENABLE):
6593 		int_val = bus->dar_enabled;
6594 		bcopy(&int_val, arg, val_size);
6595 		break;
6596 	case IOV_SVAL(IOV_DAR_ENABLE):
6597 		bus->dar_enabled = (bool)int_val;
6598 		break;
6599 	case IOV_GVAL(IOV_HSCBSIZE):
6600 		bcmerror = dhd_get_hscb_info(bus->dhd, NULL, (uint32 *)arg);
6601 		break;
6602 #ifdef DHD_BUS_MEM_ACCESS
6603 	case IOV_GVAL(IOV_HSCBBYTES):
6604 		bcmerror = dhd_get_hscb_buff(bus->dhd, int_val, int_val2, (void*)arg);
6605 		break;
6606 #endif // endif
6607 
6608 #ifdef DHD_HP2P
6609 	case IOV_SVAL(IOV_HP2P_ENABLE):
6610 		dhd_prot_hp2p_enable(bus->dhd, TRUE, int_val);
6611 		break;
6612 
6613 	case IOV_GVAL(IOV_HP2P_ENABLE):
6614 		int_val = dhd_prot_hp2p_enable(bus->dhd, FALSE, int_val);
6615 		bcopy(&int_val, arg, val_size);
6616 		break;
6617 
6618 	case IOV_SVAL(IOV_HP2P_PKT_THRESHOLD):
6619 		dhd_prot_pkt_threshold(bus->dhd, TRUE, int_val);
6620 		break;
6621 
6622 	case IOV_GVAL(IOV_HP2P_PKT_THRESHOLD):
6623 		int_val = dhd_prot_pkt_threshold(bus->dhd, FALSE, int_val);
6624 		bcopy(&int_val, arg, val_size);
6625 		break;
6626 
6627 	case IOV_SVAL(IOV_HP2P_TIME_THRESHOLD):
6628 		dhd_prot_time_threshold(bus->dhd, TRUE, int_val);
6629 		break;
6630 
6631 	case IOV_GVAL(IOV_HP2P_TIME_THRESHOLD):
6632 		int_val = dhd_prot_time_threshold(bus->dhd, FALSE, int_val);
6633 		bcopy(&int_val, arg, val_size);
6634 		break;
6635 
6636 	case IOV_SVAL(IOV_HP2P_PKT_EXPIRY):
6637 		dhd_prot_pkt_expiry(bus->dhd, TRUE, int_val);
6638 		break;
6639 
6640 	case IOV_GVAL(IOV_HP2P_PKT_EXPIRY):
6641 		int_val = dhd_prot_pkt_expiry(bus->dhd, FALSE, int_val);
6642 		bcopy(&int_val, arg, val_size);
6643 		break;
6644 	case IOV_SVAL(IOV_HP2P_TXCPL_MAXITEMS):
6645 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
6646 			return BCME_NOTDOWN;
6647 		}
6648 		dhd_bus_set_hp2p_ring_max_size(bus, TRUE, int_val);
6649 		break;
6650 
6651 	case IOV_GVAL(IOV_HP2P_TXCPL_MAXITEMS):
6652 		int_val = dhd_bus_get_hp2p_ring_max_size(bus, TRUE);
6653 		bcopy(&int_val, arg, val_size);
6654 		break;
6655 	case IOV_SVAL(IOV_HP2P_RXCPL_MAXITEMS):
6656 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
6657 			return BCME_NOTDOWN;
6658 		}
6659 		dhd_bus_set_hp2p_ring_max_size(bus, FALSE, int_val);
6660 		break;
6661 
6662 	case IOV_GVAL(IOV_HP2P_RXCPL_MAXITEMS):
6663 		int_val = dhd_bus_get_hp2p_ring_max_size(bus, FALSE);
6664 		bcopy(&int_val, arg, val_size);
6665 		break;
6666 #endif /* DHD_HP2P */
6667 	case IOV_SVAL(IOV_EXTDTXS_IN_TXCPL):
6668 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
6669 			return BCME_NOTDOWN;
6670 		}
6671 		if (int_val)
6672 			bus->dhd->extdtxs_in_txcpl = TRUE;
6673 		else
6674 			bus->dhd->extdtxs_in_txcpl = FALSE;
6675 		break;
6676 
6677 	case IOV_GVAL(IOV_EXTDTXS_IN_TXCPL):
6678 		int_val = bus->dhd->extdtxs_in_txcpl;
6679 		bcopy(&int_val, arg, val_size);
6680 		break;
6681 
6682 	case IOV_SVAL(IOV_HOSTRDY_AFTER_INIT):
6683 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
6684 			return BCME_NOTDOWN;
6685 		}
6686 		if (int_val)
6687 			bus->dhd->hostrdy_after_init = TRUE;
6688 		else
6689 			bus->dhd->hostrdy_after_init = FALSE;
6690 		break;
6691 
6692 	case IOV_GVAL(IOV_HOSTRDY_AFTER_INIT):
6693 		int_val = bus->dhd->hostrdy_after_init;
6694 		bcopy(&int_val, arg, val_size);
6695 		break;
6696 
6697 	default:
6698 		bcmerror = BCME_UNSUPPORTED;
6699 		break;
6700 	}
6701 
6702 exit:
6703 	return bcmerror;
6704 } /* dhdpcie_bus_doiovar */
6705 
6706 /** Transfers bytes from host to dongle using pio mode */
6707 static int
dhdpcie_bus_lpback_req(struct dhd_bus * bus,uint32 len)6708 dhdpcie_bus_lpback_req(struct  dhd_bus *bus, uint32 len)
6709 {
6710 	if (bus->dhd == NULL) {
6711 		DHD_ERROR(("bus not inited\n"));
6712 		return 0;
6713 	}
6714 	if (bus->dhd->prot == NULL) {
6715 		DHD_ERROR(("prot is not inited\n"));
6716 		return 0;
6717 	}
6718 	if (bus->dhd->busstate != DHD_BUS_DATA) {
6719 		DHD_ERROR(("not in a readystate to LPBK  is not inited\n"));
6720 		return 0;
6721 	}
6722 	dhdmsgbuf_lpbk_req(bus->dhd, len);
6723 	return 0;
6724 }
6725 
6726 void
dhd_bus_dump_dar_registers(struct dhd_bus * bus)6727 dhd_bus_dump_dar_registers(struct dhd_bus *bus)
6728 {
6729 	uint32 dar_clk_ctrl_val, dar_pwr_ctrl_val, dar_intstat_val,
6730 		dar_errlog_val, dar_erraddr_val, dar_pcie_mbint_val;
6731 	uint32 dar_clk_ctrl_reg, dar_pwr_ctrl_reg, dar_intstat_reg,
6732 		dar_errlog_reg, dar_erraddr_reg, dar_pcie_mbint_reg;
6733 
6734 	if (bus->is_linkdown && !bus->cto_triggered) {
6735 		DHD_ERROR(("%s: link is down\n", __FUNCTION__));
6736 		return;
6737 	}
6738 
6739 	dar_clk_ctrl_reg = (uint32)DAR_CLK_CTRL(bus->sih->buscorerev);
6740 	dar_pwr_ctrl_reg = (uint32)DAR_PCIE_PWR_CTRL(bus->sih->buscorerev);
6741 	dar_intstat_reg = (uint32)DAR_INTSTAT(bus->sih->buscorerev);
6742 	dar_errlog_reg = (uint32)DAR_ERRLOG(bus->sih->buscorerev);
6743 	dar_erraddr_reg = (uint32)DAR_ERRADDR(bus->sih->buscorerev);
6744 	dar_pcie_mbint_reg = (uint32)DAR_PCIMailBoxInt(bus->sih->buscorerev);
6745 
6746 	if (bus->sih->buscorerev < 24) {
6747 		DHD_ERROR(("%s: DAR not supported for corerev(%d) < 24\n",
6748 			__FUNCTION__, bus->sih->buscorerev));
6749 		return;
6750 	}
6751 
6752 	dar_clk_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_clk_ctrl_reg, 0, 0);
6753 	dar_pwr_ctrl_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pwr_ctrl_reg, 0, 0);
6754 	dar_intstat_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_intstat_reg, 0, 0);
6755 	dar_errlog_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_errlog_reg, 0, 0);
6756 	dar_erraddr_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_erraddr_reg, 0, 0);
6757 	dar_pcie_mbint_val = si_corereg(bus->sih, bus->sih->buscoreidx, dar_pcie_mbint_reg, 0, 0);
6758 
6759 	DHD_ERROR(("%s: dar_clk_ctrl(0x%x:0x%x) dar_pwr_ctrl(0x%x:0x%x) dar_intstat(0x%x:0x%x)\n",
6760 		__FUNCTION__, dar_clk_ctrl_reg, dar_clk_ctrl_val,
6761 		dar_pwr_ctrl_reg, dar_pwr_ctrl_val, dar_intstat_reg, dar_intstat_val));
6762 
6763 	DHD_ERROR(("%s: dar_errlog(0x%x:0x%x) dar_erraddr(0x%x:0x%x) dar_pcie_mbint(0x%x:0x%x)\n",
6764 		__FUNCTION__, dar_errlog_reg, dar_errlog_val,
6765 		dar_erraddr_reg, dar_erraddr_val, dar_pcie_mbint_reg, dar_pcie_mbint_val));
6766 }
6767 
6768 /* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */
6769 void
dhd_bus_hostready(struct dhd_bus * bus)6770 dhd_bus_hostready(struct  dhd_bus *bus)
6771 {
6772 	if (!bus->dhd->d2h_hostrdy_supported) {
6773 		return;
6774 	}
6775 
6776 	if (bus->is_linkdown) {
6777 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
6778 		return;
6779 	}
6780 
6781 	DHD_ERROR(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
6782 		dhd_pcie_config_read(bus->osh, PCI_CFG_CMD, sizeof(uint32))));
6783 
6784 	if (DAR_PWRREQ(bus)) {
6785 		dhd_bus_pcie_pwr_req(bus);
6786 	}
6787 
6788 	dhd_bus_dump_dar_registers(bus);
6789 
6790 	si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus), ~0, 0x12345678);
6791 	bus->hostready_count ++;
6792 	DHD_ERROR(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
6793 }
6794 
6795 /* Clear INTSTATUS */
6796 void
dhdpcie_bus_clear_intstatus(struct dhd_bus * bus)6797 dhdpcie_bus_clear_intstatus(struct dhd_bus *bus)
6798 {
6799 	uint32 intstatus = 0;
6800 	if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
6801 		(bus->sih->buscorerev == 2)) {
6802 		intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
6803 		dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
6804 	} else {
6805 		/* this is a PCIE core register..not a config register... */
6806 		intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, 0, 0);
6807 		si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int, bus->def_intmask,
6808 			intstatus);
6809 	}
6810 }
6811 
6812 int
6813 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
dhdpcie_bus_suspend(struct dhd_bus * bus,bool state,bool byint)6814 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state, bool byint)
6815 #else
6816 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
6817 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
6818 {
6819 	int timeleft;
6820 	int rc = 0;
6821 	unsigned long flags, flags_bus;
6822 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
6823 	int d3_read_retry = 0;
6824 	uint32 d2h_mb_data = 0;
6825 	uint32 zero = 0;
6826 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
6827 
6828 	if (bus->dhd == NULL) {
6829 		DHD_ERROR(("bus not inited\n"));
6830 		return BCME_ERROR;
6831 	}
6832 	if (bus->dhd->prot == NULL) {
6833 		DHD_ERROR(("prot is not inited\n"));
6834 		return BCME_ERROR;
6835 	}
6836 
6837 	if (dhd_query_bus_erros(bus->dhd)) {
6838 		return BCME_ERROR;
6839 	}
6840 
6841 	DHD_GENERAL_LOCK(bus->dhd, flags);
6842 	if (!(bus->dhd->busstate == DHD_BUS_DATA || bus->dhd->busstate == DHD_BUS_SUSPEND)) {
6843 		DHD_ERROR(("not in a readystate\n"));
6844 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
6845 		return BCME_ERROR;
6846 	}
6847 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
6848 	if (bus->dhd->dongle_reset) {
6849 		DHD_ERROR(("Dongle is in reset state.\n"));
6850 		return -EIO;
6851 	}
6852 
6853 	/* Check whether we are already in the requested state.
6854 	 * state=TRUE means Suspend
6855 	 * state=FALSE meanse Resume
6856 	 */
6857 	if (state == TRUE && bus->dhd->busstate == DHD_BUS_SUSPEND) {
6858 		DHD_ERROR(("Bus is already in SUSPEND state.\n"));
6859 		return BCME_OK;
6860 	} else if (state == FALSE && bus->dhd->busstate == DHD_BUS_DATA) {
6861 		DHD_ERROR(("Bus is already in RESUME state.\n"));
6862 		return BCME_OK;
6863 	}
6864 
6865 	if (state) {
6866 #ifdef OEM_ANDROID
6867 		int idle_retry = 0;
6868 		int active;
6869 #endif /* OEM_ANDROID */
6870 
6871 		if (bus->is_linkdown) {
6872 			DHD_ERROR(("%s: PCIe link was down, state=%d\n",
6873 				__FUNCTION__, state));
6874 			return BCME_ERROR;
6875 		}
6876 
6877 		/* Suspend */
6878 		DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__));
6879 
6880 		bus->dhd->dhd_watchdog_ms_backup = dhd_watchdog_ms;
6881 		if (bus->dhd->dhd_watchdog_ms_backup) {
6882 			DHD_ERROR(("%s: Disabling wdtick before going to suspend\n",
6883 				__FUNCTION__));
6884 			dhd_os_wd_timer(bus->dhd, 0);
6885 		}
6886 
6887 		DHD_GENERAL_LOCK(bus->dhd, flags);
6888 		if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) {
6889 			DHD_ERROR(("Tx Request is not ended\n"));
6890 			bus->dhd->busstate = DHD_BUS_DATA;
6891 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
6892 			return -EBUSY;
6893 		}
6894 
6895 		bus->last_suspend_start_time = OSL_LOCALTIME_NS();
6896 
6897 		/* stop all interface network queue. */
6898 		dhd_bus_stop_queue(bus);
6899 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
6900 
6901 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
6902 		if (byint) {
6903 			DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
6904 			/* Clear wait_for_d3_ack before sending D3_INFORM */
6905 			bus->wait_for_d3_ack = 0;
6906 			dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
6907 
6908 			timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
6909 			DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
6910 		} else {
6911 			/* Clear wait_for_d3_ack before sending D3_INFORM */
6912 			bus->wait_for_d3_ack = 0;
6913 			dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM | H2D_HOST_ACK_NOINT);
6914 			while (!bus->wait_for_d3_ack && d3_read_retry < MAX_D3_ACK_TIMEOUT) {
6915 				dhdpcie_handle_mb_data(bus);
6916 				usleep_range(1000, 1500);
6917 				d3_read_retry++;
6918 			}
6919 		}
6920 #else
6921 		DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
6922 		/* Clear wait_for_d3_ack before sending D3_INFORM */
6923 		bus->wait_for_d3_ack = 0;
6924 		/*
6925 		 * Send H2D_HOST_D3_INFORM to dongle and mark bus->bus_low_power_state
6926 		 * to DHD_BUS_D3_INFORM_SENT in dhd_prot_ring_write_complete_mbdata
6927 		 * inside atomic context, so that no more DBs will be
6928 		 * rung after sending D3_INFORM
6929 		 */
6930 		dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
6931 
6932 		/* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */
6933 
6934 		timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
6935 
6936 #ifdef DHD_RECOVER_TIMEOUT
6937 		if (bus->wait_for_d3_ack == 0) {
6938 			/* If wait_for_d3_ack was not updated because D2H MB was not received */
6939 			uint32 intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
6940 				bus->pcie_mailbox_int, 0, 0);
6941 			int host_irq_disabled = dhdpcie_irq_disabled(bus);
6942 			if ((intstatus) && (intstatus != (uint32)-1) &&
6943 				(timeleft == 0) && (!dhd_query_bus_erros(bus->dhd))) {
6944 				DHD_ERROR(("%s: D3 ACK trying again intstatus=%x"
6945 					" host_irq_disabled=%d\n",
6946 					__FUNCTION__, intstatus, host_irq_disabled));
6947 				dhd_pcie_intr_count_dump(bus->dhd);
6948 				dhd_print_tasklet_status(bus->dhd);
6949 				if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 &&
6950 					!bus->use_mailbox) {
6951 					dhd_prot_process_ctrlbuf(bus->dhd);
6952 				} else {
6953 					dhdpcie_handle_mb_data(bus);
6954 				}
6955 				timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
6956 				/* Clear Interrupts */
6957 				dhdpcie_bus_clear_intstatus(bus);
6958 			}
6959 		} /* bus->wait_for_d3_ack was 0 */
6960 #endif /* DHD_RECOVER_TIMEOUT */
6961 
6962 		DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
6963 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
6964 
6965 #ifdef OEM_ANDROID
6966 		/* To allow threads that got pre-empted to complete.
6967 		 */
6968 		while ((active = dhd_os_check_wakelock_all(bus->dhd)) &&
6969 			(idle_retry < MAX_WKLK_IDLE_CHECK)) {
6970 			OSL_SLEEP(1);
6971 			idle_retry++;
6972 		}
6973 #endif /* OEM_ANDROID */
6974 
6975 		if (bus->wait_for_d3_ack) {
6976 			DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__));
6977 			/* Got D3 Ack. Suspend the bus */
6978 #ifdef OEM_ANDROID
6979 			if (active) {
6980 				DHD_ERROR(("%s():Suspend failed because of wakelock"
6981 					"restoring Dongle to D0\n", __FUNCTION__));
6982 
6983 				if (bus->dhd->dhd_watchdog_ms_backup) {
6984 					DHD_ERROR(("%s: Enabling wdtick due to wakelock active\n",
6985 						__FUNCTION__));
6986 					dhd_os_wd_timer(bus->dhd,
6987 						bus->dhd->dhd_watchdog_ms_backup);
6988 				}
6989 
6990 				/*
6991 				 * Dongle still thinks that it has to be in D3 state until
6992 				 * it gets a D0 Inform, but we are backing off from suspend.
6993 				 * Ensure that Dongle is brought back to D0.
6994 				 *
6995 				 * Bringing back Dongle from D3 Ack state to D0 state is a
6996 				 * 2 step process. Dongle would want to know that D0 Inform
6997 				 * would be sent as a MB interrupt to bring it out of D3 Ack
6998 				 * state to D0 state. So we have to send both this message.
6999 				 */
7000 
7001 				/* Clear wait_for_d3_ack to send D0_INFORM or host_ready */
7002 				bus->wait_for_d3_ack = 0;
7003 
7004 				DHD_BUS_LOCK(bus->bus_lock, flags_bus);
7005 				bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
7006 				/* Enable back the intmask which was cleared in DPC
7007 				 * after getting D3_ACK.
7008 				 */
7009 				bus->resume_intr_enable_count++;
7010 
7011 				/* For Linux, Macos etc (otherthan NDIS) enable back the dongle
7012 				 * interrupts using intmask and host interrupts
7013 				 * which were disabled in the dhdpcie_bus_isr()->
7014 				 * dhd_bus_handle_d3_ack().
7015 				 */
7016 				/* Enable back interrupt using Intmask!! */
7017 				dhdpcie_bus_intr_enable(bus);
7018 				/* Enable back interrupt from Host side!! */
7019 				dhdpcie_enable_irq(bus);
7020 
7021 				DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
7022 
7023 				if (bus->use_d0_inform) {
7024 					DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
7025 					dhdpcie_send_mb_data(bus,
7026 						(H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM));
7027 					DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
7028 				}
7029 				/* ring doorbell 1 (hostready) */
7030 				dhd_bus_hostready(bus);
7031 
7032 				DHD_GENERAL_LOCK(bus->dhd, flags);
7033 				bus->dhd->busstate = DHD_BUS_DATA;
7034 				/* resume all interface network queue. */
7035 				dhd_bus_start_queue(bus);
7036 				DHD_GENERAL_UNLOCK(bus->dhd, flags);
7037 				rc = BCME_ERROR;
7038 			} else {
7039 				/* Actual Suspend after no wakelock */
7040 #endif /* OEM_ANDROID */
7041 				/* At this time bus->bus_low_power_state will be
7042 				 * made to DHD_BUS_D3_ACK_RECIEVED after recieving D3_ACK
7043 				 * in dhd_bus_handle_d3_ack()
7044 				 */
7045 				if (bus->use_d0_inform &&
7046 					(bus->api.fw_rev < PCIE_SHARED_VERSION_6)) {
7047 					DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
7048 					dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE));
7049 					DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
7050 				}
7051 
7052 #if defined(BCMPCIE_OOB_HOST_WAKE)
7053 				if (bus->dhd->dhd_induce_error == DHD_INDUCE_DROP_OOB_IRQ) {
7054 					DHD_ERROR(("%s: Inducing DROP OOB IRQ\n", __FUNCTION__));
7055 				} else {
7056 					dhdpcie_oob_intr_set(bus, TRUE);
7057 				}
7058 #endif /* BCMPCIE_OOB_HOST_WAKE */
7059 
7060 				DHD_GENERAL_LOCK(bus->dhd, flags);
7061 				/* The Host cannot process interrupts now so disable the same.
7062 				 * No need to disable the dongle INTR using intmask, as we are
7063 				 * already calling disabling INTRs from DPC context after
7064 				 * getting D3_ACK in dhd_bus_handle_d3_ack.
7065 				 * Code may not look symmetric between Suspend and
7066 				 * Resume paths but this is done to close down the timing window
7067 				 * between DPC and suspend context and bus->bus_low_power_state
7068 				 * will be set to DHD_BUS_D3_ACK_RECIEVED in DPC.
7069 				 */
7070 				bus->dhd->d3ackcnt_timeout = 0;
7071 				bus->dhd->busstate = DHD_BUS_SUSPEND;
7072 				DHD_GENERAL_UNLOCK(bus->dhd, flags);
7073 				dhdpcie_dump_resource(bus);
7074 				/* Handle Host Suspend */
7075 				rc = dhdpcie_pci_suspend_resume(bus, state);
7076 				if (!rc) {
7077 					bus->last_suspend_end_time = OSL_LOCALTIME_NS();
7078 				}
7079 #ifdef OEM_ANDROID
7080 			}
7081 #endif /* OEM_ANDROID */
7082 		} else if (timeleft == 0) { /* D3 ACK Timeout */
7083 #ifdef DHD_FW_COREDUMP
7084 			uint32 cur_memdump_mode = bus->dhd->memdump_enabled;
7085 #endif /* DHD_FW_COREDUMP */
7086 
7087 			/* check if the D3 ACK timeout due to scheduling issue */
7088 			bus->dhd->is_sched_error = !dhd_query_bus_erros(bus->dhd) &&
7089 				bus->isr_entry_time > bus->last_d3_inform_time &&
7090 				dhd_bus_query_dpc_sched_errors(bus->dhd);
7091 			bus->dhd->d3ack_timeout_occured = TRUE;
7092 			/* If the D3 Ack has timeout */
7093 			bus->dhd->d3ackcnt_timeout++;
7094 			DHD_ERROR(("%s: resumed on timeout for D3 ACK%s d3_inform_cnt %d\n",
7095 				__FUNCTION__, bus->dhd->is_sched_error ?
7096 				" due to scheduling problem" : "", bus->dhd->d3ackcnt_timeout));
7097 #if defined(DHD_KERNEL_SCHED_DEBUG) && defined(DHD_FW_COREDUMP)
7098 			if (bus->dhd->is_sched_error && cur_memdump_mode == DUMP_MEMFILE_BUGON) {
7099 				/* change g_assert_type to trigger Kernel panic */
7100 				g_assert_type = 2;
7101 				/* use ASSERT() to trigger panic */
7102 				ASSERT(0);
7103 			}
7104 #endif /* DHD_KERNEL_SCHED_DEBUG && DHD_FW_COREDUMP */
7105 			DHD_BUS_LOCK(bus->bus_lock, flags_bus);
7106 			bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
7107 			DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
7108 			DHD_GENERAL_LOCK(bus->dhd, flags);
7109 			bus->dhd->busstate = DHD_BUS_DATA;
7110 			/* resume all interface network queue. */
7111 			dhd_bus_start_queue(bus);
7112 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
7113 			if (!bus->dhd->dongle_trap_occured &&
7114 				!bus->is_linkdown &&
7115 				!bus->cto_triggered) {
7116 				uint32 intstatus = 0;
7117 
7118 				/* Check if PCIe bus status is valid */
7119 				intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
7120 					bus->pcie_mailbox_int, 0, 0);
7121 				if (intstatus == (uint32)-1) {
7122 					/* Invalidate PCIe bus status */
7123 					bus->is_linkdown = 1;
7124 				}
7125 
7126 				dhd_bus_dump_console_buffer(bus);
7127 				dhd_prot_debug_info_print(bus->dhd);
7128 #ifdef DHD_FW_COREDUMP
7129 				if (cur_memdump_mode) {
7130 					/* write core dump to file */
7131 					bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT;
7132 					dhdpcie_mem_dump(bus);
7133 				}
7134 #endif /* DHD_FW_COREDUMP */
7135 
7136 #ifdef OEM_ANDROID
7137 				DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n",
7138 					__FUNCTION__));
7139 #ifdef SUPPORT_LINKDOWN_RECOVERY
7140 #ifdef CONFIG_ARCH_MSM
7141 				bus->no_cfg_restore = 1;
7142 #endif /* CONFIG_ARCH_MSM */
7143 #endif /* SUPPORT_LINKDOWN_RECOVERY */
7144 				dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT);
7145 #endif /* OEM_ANDROID */
7146 			}
7147 #if defined(DHD_ERPOM)
7148 			dhd_schedule_reset(bus->dhd);
7149 #endif // endif
7150 			rc = -ETIMEDOUT;
7151 		}
7152 	} else {
7153 		/* Resume */
7154 		DHD_ERROR(("%s: Entering resume state\n", __FUNCTION__));
7155 		bus->last_resume_start_time = OSL_LOCALTIME_NS();
7156 
7157 		/**
7158 		 * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold.
7159 		 * si_backplane_access(function to read/write backplane)
7160 		 * updates the window(PCIE2_BAR0_CORE2_WIN) only if
7161 		 * window being accessed is different form the window
7162 		 * being pointed by second_bar0win.
7163 		 * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold,
7164 		 * invalidating second_bar0win after resume updates
7165 		 * PCIE2_BAR0_CORE2_WIN with right window.
7166 		 */
7167 		si_invalidate_second_bar0win(bus->sih);
7168 #if defined(OEM_ANDROID)
7169 #if defined(BCMPCIE_OOB_HOST_WAKE)
7170 		DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
7171 #endif /* BCMPCIE_OOB_HOST_WAKE */
7172 #endif /* linux && OEM_ANDROID */
7173 		rc = dhdpcie_pci_suspend_resume(bus, state);
7174 		dhdpcie_dump_resource(bus);
7175 
7176 		DHD_BUS_LOCK(bus->bus_lock, flags_bus);
7177 		/* set bus_low_power_state to DHD_BUS_NO_LOW_POWER_STATE */
7178 		bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
7179 		DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
7180 
7181 		if (!rc && bus->dhd->busstate == DHD_BUS_SUSPEND) {
7182 			if (bus->use_d0_inform) {
7183 				DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
7184 				dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM));
7185 				DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
7186 			}
7187 			/* ring doorbell 1 (hostready) */
7188 			dhd_bus_hostready(bus);
7189 		}
7190 		DHD_GENERAL_LOCK(bus->dhd, flags);
7191 		bus->dhd->busstate = DHD_BUS_DATA;
7192 #ifdef DHD_PCIE_RUNTIMEPM
7193 		if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) {
7194 			bus->bus_wake = 1;
7195 			OSL_SMP_WMB();
7196 			wake_up_interruptible(&bus->rpm_queue);
7197 		}
7198 #endif /* DHD_PCIE_RUNTIMEPM */
7199 		/* resume all interface network queue. */
7200 		dhd_bus_start_queue(bus);
7201 
7202 		/* TODO: for NDIS also we need to use enable_irq in future */
7203 		bus->resume_intr_enable_count++;
7204 
7205 		/* For Linux, Macos etc (otherthan NDIS) enable back the dongle interrupts
7206 		 * using intmask and host interrupts
7207 		 * which were disabled in the dhdpcie_bus_isr()->dhd_bus_handle_d3_ack().
7208 		 */
7209 		dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */
7210 		dhdpcie_enable_irq(bus); /* Enable back interrupt from Host side!! */
7211 
7212 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
7213 
7214 		if (bus->dhd->dhd_watchdog_ms_backup) {
7215 			DHD_ERROR(("%s: Enabling wdtick after resume\n",
7216 				__FUNCTION__));
7217 			dhd_os_wd_timer(bus->dhd, bus->dhd->dhd_watchdog_ms_backup);
7218 		}
7219 
7220 		bus->last_resume_end_time = OSL_LOCALTIME_NS();
7221 		/* Update TCM rd index for EDL ring */
7222 		DHD_EDL_RING_TCM_RD_UPDATE(bus->dhd);
7223 	}
7224 	return rc;
7225 }
7226 
7227 uint32
dhdpcie_force_alp(struct dhd_bus * bus,bool enable)7228 dhdpcie_force_alp(struct dhd_bus *bus, bool enable)
7229 {
7230 	ASSERT(bus && bus->sih);
7231 	if (enable) {
7232 	si_corereg(bus->sih, bus->sih->buscoreidx,
7233 		OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, CCS_FORCEALP);
7234 	} else {
7235 		si_corereg(bus->sih, bus->sih->buscoreidx,
7236 			OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, 0);
7237 	}
7238 	return 0;
7239 }
7240 
7241 /* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */
7242 uint32
dhdpcie_set_l1_entry_time(struct dhd_bus * bus,int l1_entry_time)7243 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int l1_entry_time)
7244 {
7245 	uint reg_val;
7246 
7247 	ASSERT(bus && bus->sih);
7248 
7249 	si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
7250 		0x1004);
7251 	reg_val = si_corereg(bus->sih, bus->sih->buscoreidx,
7252 		OFFSETOF(sbpcieregs_t, configdata), 0, 0);
7253 	reg_val = (reg_val & ~(0x7f << 16)) | ((l1_entry_time & 0x7f) << 16);
7254 	si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
7255 		reg_val);
7256 
7257 	return 0;
7258 }
7259 
7260 static uint32
dhd_apply_d11_war_length(struct dhd_bus * bus,uint32 len,uint32 d11_lpbk)7261 dhd_apply_d11_war_length(struct  dhd_bus *bus, uint32 len, uint32 d11_lpbk)
7262 {
7263 	uint16 chipid = si_chipid(bus->sih);
7264 	if ((chipid == BCM4375_CHIP_ID ||
7265 		chipid == BCM4362_CHIP_ID ||
7266 		chipid == BCM43751_CHIP_ID ||
7267 		chipid == BCM4377_CHIP_ID) &&
7268 		(d11_lpbk != M2M_DMA_LPBK && d11_lpbk != M2M_NON_DMA_LPBK)) {
7269 			len += 8;
7270 	}
7271 	DHD_ERROR(("%s: len %d\n", __FUNCTION__, len));
7272 	return len;
7273 }
7274 
7275 /** Transfers bytes from host to dongle and to host again using DMA */
7276 static int
dhdpcie_bus_dmaxfer_req(struct dhd_bus * bus,uint32 len,uint32 srcdelay,uint32 destdelay,uint32 d11_lpbk,uint32 core_num,uint32 wait)7277 dhdpcie_bus_dmaxfer_req(struct  dhd_bus *bus,
7278 		uint32 len, uint32 srcdelay, uint32 destdelay,
7279 		uint32 d11_lpbk, uint32 core_num, uint32 wait)
7280 {
7281 	int ret = 0;
7282 
7283 	if (bus->dhd == NULL) {
7284 		DHD_ERROR(("bus not inited\n"));
7285 		return BCME_ERROR;
7286 	}
7287 	if (bus->dhd->prot == NULL) {
7288 		DHD_ERROR(("prot is not inited\n"));
7289 		return BCME_ERROR;
7290 	}
7291 	if (bus->dhd->busstate != DHD_BUS_DATA) {
7292 		DHD_ERROR(("not in a readystate to LPBK  is not inited\n"));
7293 		return BCME_ERROR;
7294 	}
7295 
7296 	if (len < 5 || len > 4194296) {
7297 		DHD_ERROR(("len is too small or too large\n"));
7298 		return BCME_ERROR;
7299 	}
7300 
7301 	len = dhd_apply_d11_war_length(bus, len, d11_lpbk);
7302 
7303 	bus->dmaxfer_complete = FALSE;
7304 	ret = dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay,
7305 		d11_lpbk, core_num);
7306 	if (ret != BCME_OK || !wait) {
7307 		DHD_INFO(("%s: dmaxfer req returns status %u; wait = %u\n", __FUNCTION__,
7308 				ret, wait));
7309 	} else {
7310 		ret = dhd_os_dmaxfer_wait(bus->dhd, &bus->dmaxfer_complete);
7311 		if (ret < 0)
7312 			ret = BCME_NOTREADY;
7313 	}
7314 
7315 	return ret;
7316 
7317 }
7318 
7319 bool
dhd_bus_is_multibp_capable(struct dhd_bus * bus)7320 dhd_bus_is_multibp_capable(struct dhd_bus *bus)
7321 {
7322 	return MULTIBP_CAP(bus->sih);
7323 }
7324 
7325 #define PCIE_REV_FOR_4378A0	66	/* dhd_bus_perform_flr_with_quiesce() causes problems */
7326 #define PCIE_REV_FOR_4378B0	68
7327 
7328 static int
dhdpcie_bus_download_state(dhd_bus_t * bus,bool enter)7329 dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
7330 {
7331 	int bcmerror = 0;
7332 	volatile uint32 *cr4_regs;
7333 	bool do_flr;
7334 	hs_addrs_t bl_hs_addrs = {NULL, NULL};
7335 
7336 	if (bus->sih->chip == CYW55560_CHIP_ID) {
7337 		/* Host bootloader handshake TCM/REGS addresses init */
7338 		bcmerror = dhdpcie_dongle_host_get_handshake_address(bus->sih, bus->osh,
7339 			&bl_hs_addrs);
7340 		if (bcmerror) {
7341 			DHD_ERROR(("%s: REGS/TCM addresses not initialized\n", __FUNCTION__));
7342 			goto fail;
7343 		}
7344 	}
7345 
7346 	if (!bus->sih) {
7347 		DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__));
7348 		return BCME_ERROR;
7349 	}
7350 
7351 	do_flr = ((bus->sih->buscorerev != PCIE_REV_FOR_4378A0) &&
7352 			(bus->sih->buscorerev != PCIE_REV_FOR_4378B0));
7353 
7354 	if (MULTIBP_ENAB(bus->sih) && !do_flr) {
7355 		dhd_bus_pcie_pwr_req(bus);
7356 	}
7357 
7358 	/* To enter download state, disable ARM and reset SOCRAM.
7359 	 * To exit download state, simply reset ARM (default is RAM boot).
7360 	 */
7361 	if (enter) {
7362 #ifndef BCMQT	/* for performance reasons, skip the FLR for QT */
7363 #endif /* !BCMQT */
7364 
7365 		/* Make sure BAR1 maps to backplane address 0 */
7366 		dhdpcie_setbar1win(bus, 0x00000000);
7367 		bus->alp_only = TRUE;
7368 
7369 		/* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
7370 		cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
7371 
7372 		if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
7373 		    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) &&
7374 		    !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
7375 			DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
7376 			bcmerror = BCME_ERROR;
7377 			goto fail;
7378 		}
7379 
7380 		if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
7381 			/* Halt ARM & remove reset */
7382 			si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
7383 			if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) {
7384 				DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__));
7385 				bcmerror = BCME_ERROR;
7386 				goto fail;
7387 			}
7388 			si_core_reset(bus->sih, 0, 0);
7389 			/* reset last 4 bytes of RAM address. to be used for shared area */
7390 			dhdpcie_init_shared_addr(bus);
7391 		} else if (cr4_regs == NULL) { /* no CR4 present on chip */
7392 			si_core_disable(bus->sih, 0);
7393 
7394 			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
7395 				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
7396 				bcmerror = BCME_ERROR;
7397 				goto fail;
7398 			}
7399 
7400 			si_core_reset(bus->sih, 0, 0);
7401 
7402 			/* Clear the top bit of memory */
7403 			if (bus->ramsize) {
7404 				uint32 zeros = 0;
7405 				if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4,
7406 				                     (uint8*)&zeros, 4) < 0) {
7407 					bcmerror = BCME_ERROR;
7408 					goto fail;
7409 				}
7410 			}
7411 		} else {
7412 			/* For CR4,
7413 			 * Halt ARM
7414 			 * Remove ARM reset
7415 			 * Read RAM base address [0x18_0000]
7416 			 * [next] Download firmware
7417 			 * [done at else] Populate the reset vector
7418 			 * [done at else] Remove ARM halt
7419 			*/
7420 
7421 			if (bus->sih->chip == CYW55560_CHIP_ID) {
7422 
7423 				/* Skip ARM halt and reset in case of 55560 */
7424 
7425 				/* Bootloader host pre handshake function */
7426 				if ((bcmerror = dhdpcie_dongle_host_pre_handshake(bus->sih,
7427 					bus->osh, &bl_hs_addrs))) {
7428 					DHD_ERROR(("%s: error %d dongle host pre handshake\n",
7429 						__FUNCTION__, bcmerror));
7430 					goto fail;
7431 				}
7432 				DHD_ERROR(("%s: dongle host pre handshake successful, dl FW\n",
7433 					__FUNCTION__));
7434 
7435 				/* Read PCIE shared structure here */
7436 				/* This is necessary for console buffer initialization */
7437 				if ((bcmerror = dhdpcie_readshared_console(bus)) < 0) {
7438 					DHD_ERROR(("%s: Shared region not initialized\n",
7439 						__FUNCTION__));
7440 				}
7441 
7442 				/* Console buffer read - First pass */
7443 				if ((bcmerror = dhdpcie_bus_readconsole(bus)) < 0) {
7444 					DHD_ERROR(("%s: First pass console buffer read failed\n",
7445 						__FUNCTION__));
7446 				}
7447 			} else {
7448 				/* Halt ARM & remove reset */
7449 				si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
7450 				if (BCM43602_CHIP(bus->sih->chip)) {
7451 					W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX,
7452 						5);
7453 					W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA,
7454 						0);
7455 					W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX,
7456 						7);
7457 					W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA,
7458 						0);
7459 				}
7460 				/* reset last 4 bytes of RAM address. to be used for shared area */
7461 				dhdpcie_init_shared_addr(bus);
7462 			}
7463 		}
7464 	} else {
7465 		if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
7466 			/* write vars */
7467 			if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
7468 				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
7469 				goto fail;
7470 			}
7471 			/* write random numbers to sysmem for the purpose of
7472 			 * randomizing heap address space.
7473 			 */
7474 			if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
7475 				DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
7476 					__FUNCTION__));
7477 				goto fail;
7478 			}
7479 			/* switch back to arm core again */
7480 			if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
7481 				DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__));
7482 				bcmerror = BCME_ERROR;
7483 				goto fail;
7484 			}
7485 			/* write address 0 with reset instruction */
7486 			bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
7487 				(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
7488 			/* now remove reset and halt and continue to run CA7 */
7489 		} else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
7490 			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
7491 				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
7492 				bcmerror = BCME_ERROR;
7493 				goto fail;
7494 			}
7495 
7496 			if (!si_iscoreup(bus->sih)) {
7497 				DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
7498 				bcmerror = BCME_ERROR;
7499 				goto fail;
7500 			}
7501 
7502 			/* Enable remap before ARM reset but after vars.
7503 			 * No backplane access in remap mode
7504 			 */
7505 			if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
7506 			    !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
7507 				DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
7508 				bcmerror = BCME_ERROR;
7509 				goto fail;
7510 			}
7511 
7512 			if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
7513 			    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
7514 				DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
7515 				bcmerror = BCME_ERROR;
7516 				goto fail;
7517 			}
7518 		} else {
7519 			if (BCM43602_CHIP(bus->sih->chip)) {
7520 				/* Firmware crashes on SOCSRAM access when core is in reset */
7521 				if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
7522 					DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
7523 						__FUNCTION__));
7524 					bcmerror = BCME_ERROR;
7525 					goto fail;
7526 				}
7527 				si_core_reset(bus->sih, 0, 0);
7528 				si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
7529 			}
7530 
7531 			if (bus->sih->chip == CYW55560_CHIP_ID) {
7532 				/* Console buffer read - Second pass */
7533 				if ((bcmerror = dhdpcie_bus_readconsole(bus)) < 0) {
7534 					DHD_ERROR(("%s: Second pass console buffer read failed\n",
7535 						__FUNCTION__));
7536 				}
7537 
7538 				/* FW and NVRAM download done notification to bootloader */
7539 				if ((bcmerror = dhdpcie_dongle_host_post_handshake(bus->sih,
7540 					bus->osh, &bl_hs_addrs))) {
7541 					DHD_ERROR(("%s: error %d dongle host post handshake\n",
7542 						__FUNCTION__, bcmerror));
7543 					goto fail;
7544 				}
7545 				DHD_ERROR(("%s: FW download successful\n", __FUNCTION__));
7546 
7547 				/*
7548 				 * Check signature validation function
7549 				 * D2H_VALDN_DONE bit will be set in the following cases:
7550 				 * 1. Open mode: when a signature is not sent
7551 				 * 2. Secure mode: when a valid signature is sent
7552 				 * Write vars and nvram download only if the D2H_VALDN_DONE
7553 				 * bit has been set
7554 				 */
7555 
7556 				if ((bcmerror = dhdpcie_dongle_host_chk_validation(bus->sih,
7557 					bus->osh, &bl_hs_addrs))) {
7558 					DHD_ERROR(("%s: error %d dongle host validation\n",
7559 						__FUNCTION__, bcmerror));
7560 					goto fail;
7561 				}
7562 			}
7563 
7564 			/* write vars */
7565 			if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
7566 				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
7567 				goto fail;
7568 			}
7569 
7570 			/* write a random number to TCM for the purpose of
7571 			 * randomizing heap address space.
7572 			 */
7573 			if ((bcmerror = dhdpcie_wrt_rnd(bus)) != BCME_OK) {
7574 				DHD_ERROR(("%s: Failed to get random seed to write to TCM !\n",
7575 					__FUNCTION__));
7576 				goto fail;
7577 			}
7578 
7579 			/* switch back to arm core again */
7580 			if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
7581 				DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
7582 				bcmerror = BCME_ERROR;
7583 				goto fail;
7584 			}
7585 
7586 			/* write address 0 with reset instruction */
7587 			if (bus->sih->chip != CYW55560_CHIP_ID) {
7588 				bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
7589 					(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
7590 
7591 				if (bcmerror == BCME_OK) {
7592 					uint32 tmp;
7593 
7594 					bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0,
7595 						(uint8 *)&tmp, sizeof(tmp));
7596 
7597 					if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
7598 						DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
7599 							__FUNCTION__, bus->resetinstr));
7600 						DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
7601 							__FUNCTION__, tmp));
7602 						bcmerror = BCME_ERROR;
7603 						goto fail;
7604 					}
7605 				}
7606 			}
7607 
7608 			/* now remove reset and halt and continue to run CR4 */
7609 		}
7610 
7611 		if (bus->sih->chip == CYW55560_CHIP_ID) {
7612 			/* Console buffer read - Final pass */
7613 			if ((bcmerror = dhdpcie_bus_readconsole(bus)) < 0) {
7614 				DHD_ERROR(("%s: Final pass console buffer read failed\n",
7615 					__FUNCTION__));
7616 			}
7617 
7618 			/* Set write_vars done bit to let BL jump to mainline FW */
7619 			if ((bcmerror = dhdpcie_dongle_host_post_varswrite(bus, &bl_hs_addrs))) {
7620 					DHD_ERROR(("%s: error %d dongle_host_post_varswrite\n",
7621 					__FUNCTION__, bcmerror));
7622 				goto fail;
7623 			}
7624 			DHD_ERROR(("%s VARS done bit set, BL can jump to mainline FW\n",
7625 				__FUNCTION__));
7626 		} else {
7627 			si_core_reset(bus->sih, 0, 0);
7628 		}
7629 		/* Allow HT Clock now that the ARM is running. */
7630 		bus->alp_only = FALSE;
7631 
7632 		bus->dhd->busstate = DHD_BUS_LOAD;
7633 	}
7634 
7635 fail:
7636 
7637 	if (bcmerror) {
7638 		if (bus->sih->chip == CYW55560_CHIP_ID) {
7639 			/* Read the shared structure to determine console address */
7640 			if (dhdpcie_readshared_console(bus) < 0) {
7641 				DHD_ERROR(("%s: Shared region not initialized\n",
7642 					__FUNCTION__));
7643 			} else {
7644 				/* Console buffer read */
7645 				if (dhdpcie_bus_readconsole(bus) < 0) {
7646 					DHD_ERROR(("%s: Failure case console buffer read failed\n",
7647 						__FUNCTION__));
7648 				}
7649 			}
7650 		}
7651 	}
7652 
7653 	/* Always return to PCIE core */
7654 	si_setcore(bus->sih, PCIE2_CORE_ID, 0);
7655 
7656 	if (MULTIBP_ENAB(bus->sih) && !do_flr) {
7657 		dhd_bus_pcie_pwr_req_clear(bus);
7658 	}
7659 
7660 	return bcmerror;
7661 } /* dhdpcie_bus_download_state */
7662 
7663 static int
dhdpcie_dongle_host_get_handshake_address(si_t * sih,osl_t * osh,hs_addrs_t * addr)7664 dhdpcie_dongle_host_get_handshake_address(si_t *sih, osl_t *osh, hs_addrs_t *addr)
7665 {
7666 	int bcmerror = BCME_OK;
7667 
7668 #ifndef HS_IN_TCM
7669 	sbpcieregs_t *pcieregs;
7670 
7671 	pcieregs = si_setcore(sih, PCIE2_CORE_ID, 0);
7672 	if (!pcieregs) {
7673 		return BCME_ERROR;
7674 	}
7675 	addr->d2h = &pcieregs->u1.dar_64.d2h_msg_reg0;
7676 	addr->h2d = &pcieregs->u1.dar_64.h2d_msg_reg0;
7677 #else /* HS_IN_TCM */
7678 	addr->d2h = (void *)HS_IN_TCM;
7679 	addr->h2d = (volatile uint32 *)addr->d2h + 1;
7680 #endif /* HS_IN_TCM */
7681 
7682 	return bcmerror;
7683 } /* dhdpcie_dongle_host_get_handshake_address */
7684 
7685 static int
dhdpcie_handshake_msg_reg_write(si_t * sih,osl_t * osh,volatile void * addr,uint * buffer)7686 dhdpcie_handshake_msg_reg_write(si_t *sih, osl_t *osh, volatile void *addr, uint *buffer)
7687 {
7688 	int bcmerror = BCME_OK;
7689 
7690 #ifndef HS_IN_TCM
7691 	si_setcore(sih, PCIE2_CORE_ID, 0);
7692 	W_REG(osh, (volatile uint32 *)addr, *buffer);
7693 #else
7694 	bcmerror = si_backplane_access(sih, addr, 4, buffer, FALSE);
7695 #endif // endif
7696 	return bcmerror;
7697 } /* dhdpcie_handshake_msg_reg_write */
7698 
7699 static int
dhdpcie_handshake_msg_reg_read(si_t * sih,osl_t * osh,volatile void * addr,uint * buffer)7700 dhdpcie_handshake_msg_reg_read(si_t *sih, osl_t *osh, volatile void *addr, uint *buffer)
7701 {
7702 	int bcmerror = BCME_OK;
7703 
7704 #ifndef HS_IN_TCM
7705 	si_setcore(sih, PCIE2_CORE_ID, 0);
7706 	*buffer = R_REG(osh, (volatile uint32 *)addr);
7707 #else
7708 	bcmerror = si_backplane_access(sih, addr, 4, buffer, TRUE);
7709 #endif // endif
7710 	return bcmerror;
7711 } /* dhdpcie_handshake_msg_reg_read */
7712 
7713 static int
dhdpcie_dongle_host_handshake_spinwait(si_t * sih,osl_t * osh,volatile void * addr,uint32 bitshift,uint32 us)7714 dhdpcie_dongle_host_handshake_spinwait(si_t *sih, osl_t *osh, volatile void *addr, uint32 bitshift,
7715 	uint32 us)
7716 {
7717 	uint32 countdown_;
7718 	uint32 read_addr = 0;
7719 	int bcmerror = BCME_OK;
7720 
7721 	for (countdown_ = (us) + (HS_POLL_PERIOD_US - 1U); countdown_ >= HS_POLL_PERIOD_US;
7722 		countdown_ -= HS_POLL_PERIOD_US) {
7723 
7724 		bcmerror = dhdpcie_handshake_msg_reg_read(sih, osh, addr, &read_addr);
7725 		if (bcmerror) {
7726 			bcmerror = BCME_ERROR;
7727 			break;
7728 		}
7729 
7730 		if (isset(&read_addr, bitshift)) {
7731 			bcmerror = BCME_OK;
7732 			break;
7733 		}
7734 
7735 		OSL_DELAY(HS_POLL_PERIOD_US);
7736 	}
7737 
7738 	if (countdown_ <= HS_POLL_PERIOD_US) {
7739 		bcmerror = BCME_NOTREADY;
7740 	}
7741 
7742 	return bcmerror;
7743 } /* dhdpcie_dongle_host_handshake_spinwait */
7744 
7745 static int
dhdpcie_dongle_host_pre_handshake(si_t * sih,osl_t * osh,hs_addrs_t * addr)7746 dhdpcie_dongle_host_pre_handshake(si_t *sih, osl_t *osh, hs_addrs_t *addr)
7747 {
7748 	int bcmerror = BCME_OK;
7749 	int h2d_reg = 0x00000000;
7750 
7751 	/* Host initialization for dongle to host handshake */
7752 	bcmerror = dhdpcie_handshake_msg_reg_write(sih, osh, addr->h2d, &h2d_reg);
7753 	if (bcmerror) {
7754 		goto err;
7755 	}
7756 
7757 	bcmerror = dhdpcie_dongle_host_handshake_spinwait(sih, osh, addr->d2h, D2H_READY_SHIFT,
7758 		D2H_READY_TIMEOUT_US);
7759 	if (!bcmerror) {
7760 
7761 		/* Set H2D_DL_START indication to dongle that Host shall start FW download */
7762 		h2d_reg = 0;
7763 		setbit(&h2d_reg, H2D_DL_START_SHIFT);
7764 		bcmerror = dhdpcie_handshake_msg_reg_write(sih, osh, addr->h2d, &h2d_reg);
7765 		if (bcmerror) {
7766 			goto err;
7767 		}
7768 	}
7769 
7770 err:
7771 	return bcmerror;
7772 } /* dhdpcie_dongle_host_pre_handshake */
7773 
7774 static int
dhdpcie_dongle_host_post_handshake(si_t * sih,osl_t * osh,hs_addrs_t * addr)7775 dhdpcie_dongle_host_post_handshake(si_t *sih, osl_t *osh, hs_addrs_t *addr)
7776 {
7777 	int bcmerror = BCME_OK;
7778 	int h2d_reg = 0x00000000;
7779 
7780 	/* Reset download start */
7781 	clrbit(&h2d_reg, H2D_DL_START_SHIFT);
7782 
7783 	/* download done */
7784 	setbit(&h2d_reg, H2D_DL_DONE_SHIFT);
7785 	bcmerror = dhdpcie_handshake_msg_reg_write(sih, osh, addr->h2d, &h2d_reg);
7786 	if (bcmerror) {
7787 		goto err;
7788 	}
7789 
7790 	bcmerror = dhdpcie_dongle_host_handshake_spinwait(sih, osh, addr->d2h,
7791 		D2H_TRX_HDR_PARSE_DONE_SHIFT, D2H_TRX_HDR_PARSE_DONE_TIMEOUT_US);
7792 
7793 	if (bcmerror) {
7794 		/* Host notification to bootloader to get reset on error */
7795 		dhdpcie_handshake_msg_reg_read(sih, osh, addr->h2d, &h2d_reg);
7796 		setbit(&h2d_reg, H2D_BL_RESET_ON_ERROR_SHIFT);
7797 		dhdpcie_handshake_msg_reg_write(sih, osh, addr->h2d, &h2d_reg);
7798 	}
7799 
7800 err:
7801 	return bcmerror;
7802 } /* dhdpcie_dongle_host_post_handshake */
7803 
7804 static int
dhdpcie_dongle_host_chk_validation(si_t * sih,osl_t * osh,hs_addrs_t * addr)7805 dhdpcie_dongle_host_chk_validation(si_t *sih, osl_t *osh, hs_addrs_t *addr)
7806 {
7807 	int bcmerror = BCME_OK;
7808 	uint d2h_reg = 0x00000000;
7809 	uint h2d_reg = 0x00000000;
7810 
7811 	bcmerror = dhdpcie_dongle_host_handshake_spinwait(sih, osh, addr->d2h, D2H_VALDN_DONE_SHIFT,
7812 		D2H_VALDN_DONE_TIMEOUT_US);
7813 	if (!bcmerror) {
7814 
7815 		bcmerror = dhdpcie_handshake_msg_reg_read(sih, osh, addr->d2h, &d2h_reg);
7816 		if (!bcmerror) {
7817 
7818 			if (isset(&d2h_reg, D2H_VALDN_RESULT_SHIFT)) {
7819 				DHD_ERROR(("%s: TRX img validation check successful\n",
7820 				__FUNCTION__));
7821 			} else {
7822 				DHD_ERROR(("%s: TRX img validation check failed\n", __FUNCTION__));
7823 				bcmerror = BCME_ERROR;
7824 			}
7825 		}
7826 	}
7827 
7828 	if (bcmerror) {
7829 		/* Host notification to bootloader to get reset on error
7830 		 * To avoid the race condition betweeen host and dongle
7831 		 */
7832 		dhdpcie_handshake_msg_reg_read(sih, osh, addr->h2d, &h2d_reg);
7833 		setbit(&h2d_reg, H2D_BL_RESET_ON_ERROR_SHIFT);
7834 		dhdpcie_handshake_msg_reg_write(sih, osh, addr->h2d, &h2d_reg);
7835 	}
7836 
7837 	return bcmerror;
7838 } /* dhdpcie_dongle_host_chk_validation */
7839 
7840 int
dhdpcie_dongle_host_pre_wd_reset_sequence(si_t * sih,osl_t * osh)7841 dhdpcie_dongle_host_pre_wd_reset_sequence(si_t *sih, osl_t *osh)
7842 {
7843 	int32 bcmerror = BCME_ERROR;
7844 	sbpcieregs_t *pcieregs = NULL;
7845 	uint32 reg_val = 0;
7846 
7847 	if (sih && osh) {
7848 
7849 		pcieregs = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0);
7850 
7851 		/* Host initialization for dongle to host handshake */
7852 		bcmerror = dhdpcie_handshake_msg_reg_write(sih, osh,
7853 			&pcieregs->u1.dar_64.h2d_msg_reg0, &reg_val);
7854 	}
7855 
7856 	return bcmerror;
7857 } /* dhdpcie_dongle_host_pre_wd_reset_sequence */
7858 
7859 int
dhdpcie_dongle_host_post_wd_reset_sequence(si_t * sih,osl_t * osh)7860 dhdpcie_dongle_host_post_wd_reset_sequence(si_t *sih, osl_t *osh)
7861 {
7862 	int32 bcmerror = BCME_ERROR;
7863 	sbpcieregs_t *pcieregs = NULL;
7864 	uint32 reg_val = 0;
7865 	int32 idx = 0;
7866 	int print_interval = D2H_READY_WD_RESET_COUNT / 10;
7867 
7868 	if (sih && osh) {
7869 		pcieregs = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0);
7870 
7871 		/* Host initialization for dongle to host handshake */
7872 		bcmerror = dhdpcie_handshake_msg_reg_write(sih, osh,
7873 			&pcieregs->u1.dar_64.h2d_msg_reg0, &reg_val);
7874 
7875 		for (idx = D2H_READY_WD_RESET_COUNT; idx > 0; idx--) {
7876 
7877 #ifdef BCMQT
7878 			OSL_SLEEP(D2H_READY_WD_RESET_US/1000);
7879 #else
7880 			OSL_DELAY(D2H_READY_WD_RESET_US);
7881 #endif // endif
7882 			if (!(idx % print_interval)) {
7883 				DHD_ERROR(("Waiting %d us for D2H_READY\n",
7884 					idx * D2H_READY_WD_RESET_US));
7885 			}
7886 
7887 			dhdpcie_handshake_msg_reg_read(sih, osh, &pcieregs->u1.dar_64.d2h_msg_reg0,
7888 				&reg_val);
7889 			if (isset(&reg_val, D2H_READY_SHIFT)) {
7890 				break;
7891 			}
7892 		}
7893 
7894 		if (!idx) {
7895 			DHD_ERROR(("%s: error - Waiting for D2H_READY timeout %d\n",
7896 				__FUNCTION__, idx));
7897 		} else {
7898 			bcmerror = BCME_OK;
7899 		}
7900 	}
7901 
7902 	return bcmerror;
7903 } /* dhdpcie_dongle_host_post_wd_reset_sequence */
7904 
7905 /* Pre ChipId access sequence making sure that H2D HS reg is cleared and
7906  * host waited for bootloader to be ready before chipid access.
7907  */
7908 int
dhdpcie_dongle_host_pre_chipid_access_sequence(osl_t * osh,volatile void * regva)7909 dhdpcie_dongle_host_pre_chipid_access_sequence(osl_t *osh, volatile void *regva)
7910 {
7911 	int32 bcmerror = BCME_ERROR;
7912 	sbpcieregs_t *pcieregs = NULL;
7913 	uint32 reg_val = 0;
7914 	int32 idx = 0;
7915 	int print_interval = D2H_READY_WD_RESET_COUNT / 10;
7916 
7917 	if (osh && regva) {
7918 
7919 		pcieregs = (sbpcieregs_t*)(regva);
7920 
7921 		/* Host init for D2H handshake */
7922 		W_REG(osh, &pcieregs->u1.dar_64.h2d_msg_reg0, reg_val);
7923 
7924 		/* Host waits for bootloader to be ready before ChipId access */
7925 		for (idx = D2H_READY_WD_RESET_COUNT; idx > 0; idx--) {
7926 
7927 #ifdef BCMQT
7928 			OSL_SLEEP(D2H_READY_WD_RESET_US/1000);
7929 #else
7930 			OSL_DELAY(D2H_READY_WD_RESET_US);
7931 #endif // endif
7932 			if (!(idx % print_interval)) {
7933 				DHD_ERROR(("Waiting %d us for D2H_READY\n",
7934 					idx * D2H_READY_WD_RESET_US));
7935 			}
7936 			reg_val = R_REG(osh, &pcieregs->u1.dar_64.d2h_msg_reg0);
7937 			if (isset(&reg_val, D2H_READY_SHIFT)) {
7938 				break;
7939 			}
7940 		}
7941 
7942 		if (!idx) {
7943 			DHD_ERROR(("%s: error - Waiting for D2H_READY timeout %d\n",
7944 				__FUNCTION__, idx));
7945 		} else {
7946 			bcmerror = BCME_OK;
7947 		}
7948 	}
7949 
7950 	return bcmerror;
7951 } /* dhdpcie_dongle_host_pre_chipid_access_sequence */
7952 
7953 static int
dhdpcie_dongle_host_post_varswrite(dhd_bus_t * bus,hs_addrs_t * addr)7954 dhdpcie_dongle_host_post_varswrite(dhd_bus_t *bus, hs_addrs_t *addr)
7955 {
7956 	int bcmerror = BCME_OK;
7957 	uint h2d_reg = 0x00000000;
7958 
7959 	/* Set NVRAM done bit (Download done is already set) */
7960 	setbit(&h2d_reg, H2D_DL_DONE_SHIFT);
7961 	setbit(&h2d_reg, H2D_DL_NVRAM_DONE_SHIFT);
7962 	bcmerror = dhdpcie_handshake_msg_reg_write(bus->sih, bus->osh, addr->h2d, &h2d_reg);
7963 
7964 	return bcmerror;
7965 } /* dhdpcie_dongle_host_post_varswrite */
7966 
7967 static int
dhdpcie_bus_write_vars(dhd_bus_t * bus)7968 dhdpcie_bus_write_vars(dhd_bus_t *bus)
7969 {
7970 	int bcmerror = 0;
7971 	uint32 varsize, phys_size;
7972 	uint32 varaddr;
7973 	uint8 *vbuffer;
7974 	uint32 varsizew;
7975 #ifdef DHD_DEBUG
7976 	uint8 *nvram_ularray;
7977 #endif /* DHD_DEBUG */
7978 
7979 	/* Even if there are no vars are to be written, we still need to set the ramsize. */
7980 	varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
7981 	varaddr = (bus->ramsize - 4) - varsize;
7982 
7983 	varaddr += bus->dongle_ram_base;
7984 
7985 	if (bus->vars) {
7986 
7987 		vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
7988 		if (!vbuffer)
7989 			return BCME_NOMEM;
7990 
7991 		bzero(vbuffer, varsize);
7992 		bcopy(bus->vars, vbuffer, bus->varsz);
7993 		/* Write the vars list */
7994 		bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
7995 
7996 		/* Implement read back and verify later */
7997 #ifdef DHD_DEBUG
7998 		/* Verify NVRAM bytes */
7999 		DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
8000 		nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
8001 		if (!nvram_ularray) {
8002 			MFREE(bus->dhd->osh, vbuffer, varsize);
8003 			return BCME_NOMEM;
8004 		}
8005 
8006 		/* Upload image to verify downloaded contents. */
8007 		memset(nvram_ularray, 0xaa, varsize);
8008 
8009 		/* Read the vars list to temp buffer for comparison */
8010 		bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
8011 		if (bcmerror) {
8012 				DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
8013 					__FUNCTION__, bcmerror, varsize, varaddr));
8014 		}
8015 
8016 		/* Compare the org NVRAM with the one read from RAM */
8017 		if (memcmp(vbuffer, nvram_ularray, varsize)) {
8018 			DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
8019 		} else
8020 			DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
8021 			__FUNCTION__));
8022 
8023 		MFREE(bus->dhd->osh, nvram_ularray, varsize);
8024 #endif /* DHD_DEBUG */
8025 
8026 		MFREE(bus->dhd->osh, vbuffer, varsize);
8027 	}
8028 
8029 	phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
8030 
8031 	phys_size += bus->dongle_ram_base;
8032 
8033 	/* adjust to the user specified RAM */
8034 	DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
8035 		phys_size, bus->ramsize));
8036 	DHD_INFO(("Vars are at %d, orig varsize is %d\n",
8037 		varaddr, varsize));
8038 	varsize = ((phys_size - 4) - varaddr);
8039 
8040 	/*
8041 	 * Determine the length token:
8042 	 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
8043 	 */
8044 	if (bcmerror) {
8045 		varsizew = 0;
8046 		bus->nvram_csm = varsizew;
8047 	} else {
8048 		varsizew = varsize / 4;
8049 		varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
8050 		bus->nvram_csm = varsizew;
8051 		varsizew = htol32(varsizew);
8052 	}
8053 
8054 	DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
8055 
8056 	/* Write the length token to the last word */
8057 	bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4),
8058 		(uint8*)&varsizew, 4);
8059 
8060 	return bcmerror;
8061 } /* dhdpcie_bus_write_vars */
8062 
8063 int
dhdpcie_downloadvars(dhd_bus_t * bus,void * arg,int len)8064 dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len)
8065 {
8066 	int bcmerror = BCME_OK;
8067 
8068 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
8069 
8070 	/* Basic sanity checks */
8071 	if (bus->dhd->up) {
8072 		bcmerror = BCME_NOTDOWN;
8073 		goto err;
8074 	}
8075 	if (!len) {
8076 		bcmerror = BCME_BUFTOOSHORT;
8077 		goto err;
8078 	}
8079 
8080 	/* Free the old ones and replace with passed variables */
8081 	if (bus->vars)
8082 		MFREE(bus->dhd->osh, bus->vars, bus->varsz);
8083 
8084 	bus->vars = MALLOC(bus->dhd->osh, len);
8085 	bus->varsz = bus->vars ? len : 0;
8086 	if (bus->vars == NULL) {
8087 		bcmerror = BCME_NOMEM;
8088 		goto err;
8089 	}
8090 
8091 	/* Copy the passed variables, which should include the terminating double-null */
8092 	bcopy(arg, bus->vars, bus->varsz);
8093 
8094 #ifdef DHD_USE_SINGLE_NVRAM_FILE
8095 	if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
8096 		char *sp = NULL;
8097 		char *ep = NULL;
8098 		int i;
8099 		char tag[2][8] = {"ccode=", "regrev="};
8100 
8101 		/* Find ccode and regrev info */
8102 		for (i = 0; i < 2; i++) {
8103 			sp = strnstr(bus->vars, tag[i], bus->varsz);
8104 			if (!sp) {
8105 				DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n",
8106 					__FUNCTION__, bus->nv_path));
8107 				bcmerror = BCME_ERROR;
8108 				goto err;
8109 			}
8110 			sp = strchr(sp, '=');
8111 			ep = strchr(sp, '\0');
8112 			/* We assumed that string length of both ccode and
8113 			 * regrev values should not exceed WLC_CNTRY_BUF_SZ
8114 			 */
8115 			if (ep && ((ep - sp) <= WLC_CNTRY_BUF_SZ)) {
8116 				sp++;
8117 				while (*sp != '\0') {
8118 					DHD_INFO(("%s: parse '%s', current sp = '%c'\n",
8119 						__FUNCTION__, tag[i], *sp));
8120 					*sp++ = '0';
8121 				}
8122 			} else {
8123 				DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n",
8124 					__FUNCTION__, tag[i]));
8125 				bcmerror = BCME_ERROR;
8126 				goto err;
8127 			}
8128 		}
8129 	}
8130 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
8131 
8132 err:
8133 	return bcmerror;
8134 }
8135 
8136 /* loop through the capability list and see if the pcie capabilty exists */
8137 uint8
dhdpcie_find_pci_capability(osl_t * osh,uint8 req_cap_id)8138 dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id)
8139 {
8140 	uint8 cap_id;
8141 	uint8 cap_ptr = 0;
8142 	uint8 byte_val;
8143 
8144 	/* check for Header type 0 */
8145 	byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
8146 	if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) {
8147 		DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__));
8148 		goto end;
8149 	}
8150 
8151 	/* check if the capability pointer field exists */
8152 	byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
8153 	if (!(byte_val & PCI_CAPPTR_PRESENT)) {
8154 		DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__));
8155 		goto end;
8156 	}
8157 
8158 	cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
8159 	/* check if the capability pointer is 0x00 */
8160 	if (cap_ptr == 0x00) {
8161 		DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__));
8162 		goto end;
8163 	}
8164 
8165 	/* loop thr'u the capability list and see if the pcie capabilty exists */
8166 
8167 	cap_id = read_pci_cfg_byte(cap_ptr);
8168 
8169 	while (cap_id != req_cap_id) {
8170 		cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
8171 		if (cap_ptr == 0x00) break;
8172 		cap_id = read_pci_cfg_byte(cap_ptr);
8173 	}
8174 
8175 end:
8176 	return cap_ptr;
8177 }
8178 
8179 void
dhdpcie_pme_active(osl_t * osh,bool enable)8180 dhdpcie_pme_active(osl_t *osh, bool enable)
8181 {
8182 	uint8 cap_ptr;
8183 	uint32 pme_csr;
8184 
8185 	cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
8186 
8187 	if (!cap_ptr) {
8188 		DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
8189 		return;
8190 	}
8191 
8192 	pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32));
8193 	DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr));
8194 
8195 	pme_csr |= PME_CSR_PME_STAT;
8196 	if (enable) {
8197 		pme_csr |= PME_CSR_PME_EN;
8198 	} else {
8199 		pme_csr &= ~PME_CSR_PME_EN;
8200 	}
8201 
8202 	OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr);
8203 }
8204 
8205 bool
dhdpcie_pme_cap(osl_t * osh)8206 dhdpcie_pme_cap(osl_t *osh)
8207 {
8208 	uint8 cap_ptr;
8209 	uint32 pme_cap;
8210 
8211 	cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
8212 
8213 	if (!cap_ptr) {
8214 		DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
8215 		return FALSE;
8216 	}
8217 
8218 	pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32));
8219 
8220 	DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap));
8221 
8222 	return ((pme_cap & PME_CAP_PM_STATES) != 0);
8223 }
8224 
8225 uint32
dhdpcie_lcreg(osl_t * osh,uint32 mask,uint32 val)8226 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val)
8227 {
8228 
8229 	uint8	pcie_cap;
8230 	uint8	lcreg_offset;	/* PCIE capability LCreg offset in the config space */
8231 	uint32	reg_val;
8232 
8233 	pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
8234 
8235 	if (!pcie_cap) {
8236 		DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
8237 		return 0;
8238 	}
8239 
8240 	lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
8241 
8242 	/* set operation */
8243 	if (mask) {
8244 		/* read */
8245 		reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
8246 
8247 		/* modify */
8248 		reg_val &= ~mask;
8249 		reg_val |= (mask & val);
8250 
8251 		/* write */
8252 		OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
8253 	}
8254 	return OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
8255 }
8256 
8257 uint8
dhdpcie_clkreq(osl_t * osh,uint32 mask,uint32 val)8258 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val)
8259 {
8260 	uint8	pcie_cap;
8261 	uint32	reg_val;
8262 	uint8	lcreg_offset;	/* PCIE capability LCreg offset in the config space */
8263 
8264 	pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
8265 
8266 	if (!pcie_cap) {
8267 		DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
8268 		return 0;
8269 	}
8270 
8271 	lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
8272 
8273 	reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
8274 	/* set operation */
8275 	if (mask) {
8276 		if (val)
8277 			reg_val |= PCIE_CLKREQ_ENAB;
8278 		else
8279 			reg_val &= ~PCIE_CLKREQ_ENAB;
8280 		OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
8281 		reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
8282 	}
8283 	if (reg_val & PCIE_CLKREQ_ENAB)
8284 		return 1;
8285 	else
8286 		return 0;
8287 }
8288 
dhd_dump_intr_counters(dhd_pub_t * dhd,struct bcmstrbuf * strbuf)8289 void dhd_dump_intr_counters(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
8290 {
8291 	dhd_bus_t *bus;
8292 	uint64 current_time = OSL_LOCALTIME_NS();
8293 
8294 	if (!dhd) {
8295 		DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
8296 		return;
8297 	}
8298 
8299 	bus = dhd->bus;
8300 	if (!bus) {
8301 		DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
8302 		return;
8303 	}
8304 
8305 	bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n");
8306 	bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
8307 		"isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
8308 		"dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
8309 		bus->resume_intr_enable_count, bus->dpc_intr_enable_count,
8310 		bus->isr_intr_disable_count, bus->suspend_intr_disable_count,
8311 		bus->dpc_return_busdown_count, bus->non_ours_irq_count);
8312 #ifdef BCMPCIE_OOB_HOST_WAKE
8313 	bcm_bprintf(strbuf, "oob_intr_count=%lu oob_intr_enable_count=%lu"
8314 		" oob_intr_disable_count=%lu\noob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT
8315 		" last_oob_irq_enable_time="SEC_USEC_FMT"\nlast_oob_irq_disable_time="SEC_USEC_FMT
8316 		" oob_irq_enabled=%d oob_gpio_level=%d\n",
8317 		bus->oob_intr_count, bus->oob_intr_enable_count,
8318 		bus->oob_intr_disable_count, dhdpcie_get_oob_irq_num(bus),
8319 		GET_SEC_USEC(bus->last_oob_irq_time), GET_SEC_USEC(bus->last_oob_irq_enable_time),
8320 		GET_SEC_USEC(bus->last_oob_irq_disable_time), dhdpcie_get_oob_irq_status(bus),
8321 		dhdpcie_get_oob_irq_level());
8322 #endif /* BCMPCIE_OOB_HOST_WAKE */
8323 	bcm_bprintf(strbuf, "\ncurrent_time="SEC_USEC_FMT" isr_entry_time="SEC_USEC_FMT
8324 		" isr_exit_time="SEC_USEC_FMT"\ndpc_sched_time="SEC_USEC_FMT
8325 		" last_non_ours_irq_time="SEC_USEC_FMT" dpc_entry_time="SEC_USEC_FMT"\n"
8326 		"last_process_ctrlbuf_time="SEC_USEC_FMT " last_process_flowring_time="SEC_USEC_FMT
8327 		" last_process_txcpl_time="SEC_USEC_FMT"\nlast_process_rxcpl_time="SEC_USEC_FMT
8328 		" last_process_infocpl_time="SEC_USEC_FMT" last_process_edl_time="SEC_USEC_FMT
8329 		"\ndpc_exit_time="SEC_USEC_FMT" resched_dpc_time="SEC_USEC_FMT"\n"
8330 		"last_d3_inform_time="SEC_USEC_FMT"\n",
8331 		GET_SEC_USEC(current_time), GET_SEC_USEC(bus->isr_entry_time),
8332 		GET_SEC_USEC(bus->isr_exit_time), GET_SEC_USEC(bus->dpc_sched_time),
8333 		GET_SEC_USEC(bus->last_non_ours_irq_time), GET_SEC_USEC(bus->dpc_entry_time),
8334 		GET_SEC_USEC(bus->last_process_ctrlbuf_time),
8335 		GET_SEC_USEC(bus->last_process_flowring_time),
8336 		GET_SEC_USEC(bus->last_process_txcpl_time),
8337 		GET_SEC_USEC(bus->last_process_rxcpl_time),
8338 		GET_SEC_USEC(bus->last_process_infocpl_time),
8339 		GET_SEC_USEC(bus->last_process_edl_time),
8340 		GET_SEC_USEC(bus->dpc_exit_time), GET_SEC_USEC(bus->resched_dpc_time),
8341 		GET_SEC_USEC(bus->last_d3_inform_time));
8342 
8343 	bcm_bprintf(strbuf, "\nlast_suspend_start_time="SEC_USEC_FMT" last_suspend_end_time="
8344 		SEC_USEC_FMT" last_resume_start_time="SEC_USEC_FMT" last_resume_end_time="
8345 		SEC_USEC_FMT"\n", GET_SEC_USEC(bus->last_suspend_start_time),
8346 		GET_SEC_USEC(bus->last_suspend_end_time),
8347 		GET_SEC_USEC(bus->last_resume_start_time),
8348 		GET_SEC_USEC(bus->last_resume_end_time));
8349 
8350 #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
8351 		bcm_bprintf(strbuf, "logtrace_thread_entry_time="SEC_USEC_FMT
8352 			" logtrace_thread_sem_down_time="SEC_USEC_FMT
8353 			"\nlogtrace_thread_flush_time="SEC_USEC_FMT
8354 			" logtrace_thread_unexpected_break_time="SEC_USEC_FMT
8355 			"\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
8356 			GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
8357 			GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
8358 			GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
8359 			GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
8360 			GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time));
8361 #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
8362 }
8363 
dhd_dump_intr_registers(dhd_pub_t * dhd,struct bcmstrbuf * strbuf)8364 void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
8365 {
8366 	uint32 intstatus = 0;
8367 	uint32 intmask = 0;
8368 	uint32 d2h_db0 = 0;
8369 	uint32 d2h_mb_data = 0;
8370 
8371 	intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
8372 		dhd->bus->pcie_mailbox_int, 0, 0);
8373 	intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
8374 		dhd->bus->pcie_mailbox_mask, 0, 0);
8375 	d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
8376 	dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
8377 
8378 	bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
8379 		intstatus, intmask, d2h_db0);
8380 	bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n",
8381 		d2h_mb_data, dhd->bus->def_intmask);
8382 }
8383 /** Add bus dump output to a buffer */
dhd_bus_dump(dhd_pub_t * dhdp,struct bcmstrbuf * strbuf)8384 void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
8385 {
8386 	uint16 flowid;
8387 	int ix = 0;
8388 	flow_ring_node_t *flow_ring_node;
8389 	flow_info_t *flow_info;
8390 #ifdef TX_STATUS_LATENCY_STATS
8391 	uint8 ifindex;
8392 	if_flow_lkup_t *if_flow_lkup;
8393 	dhd_if_tx_status_latency_t if_tx_status_latency[DHD_MAX_IFS];
8394 #endif /* TX_STATUS_LATENCY_STATS */
8395 
8396 	if (dhdp->busstate != DHD_BUS_DATA)
8397 		return;
8398 
8399 #ifdef TX_STATUS_LATENCY_STATS
8400 	memset(if_tx_status_latency, 0, sizeof(if_tx_status_latency));
8401 #endif /* TX_STATUS_LATENCY_STATS */
8402 #ifdef DHD_WAKE_STATUS
8403 	bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n",
8404 		bcmpcie_get_total_wake(dhdp->bus), dhdp->bus->wake_counts.rxwake,
8405 		dhdp->bus->wake_counts.rcwake);
8406 #ifdef DHD_WAKE_RX_STATUS
8407 	bcm_bprintf(strbuf, " unicast %u muticast %u broadcast %u arp %u\n",
8408 		dhdp->bus->wake_counts.rx_ucast, dhdp->bus->wake_counts.rx_mcast,
8409 		dhdp->bus->wake_counts.rx_bcast, dhdp->bus->wake_counts.rx_arp);
8410 	bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n",
8411 		dhdp->bus->wake_counts.rx_multi_ipv4, dhdp->bus->wake_counts.rx_multi_ipv6,
8412 		dhdp->bus->wake_counts.rx_icmpv6, dhdp->bus->wake_counts.rx_multi_other);
8413 	bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n",
8414 		dhdp->bus->wake_counts.rx_icmpv6_ra, dhdp->bus->wake_counts.rx_icmpv6_na,
8415 		dhdp->bus->wake_counts.rx_icmpv6_ns);
8416 #endif /* DHD_WAKE_RX_STATUS */
8417 #ifdef DHD_WAKE_EVENT_STATUS
8418 	for (flowid = 0; flowid < WLC_E_LAST; flowid++)
8419 		if (dhdp->bus->wake_counts.rc_event[flowid] != 0)
8420 			bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(flowid),
8421 				dhdp->bus->wake_counts.rc_event[flowid]);
8422 	bcm_bprintf(strbuf, "\n");
8423 #endif /* DHD_WAKE_EVENT_STATUS */
8424 #endif /* DHD_WAKE_STATUS */
8425 
8426 	dhd_prot_print_info(dhdp, strbuf);
8427 	dhd_dump_intr_registers(dhdp, strbuf);
8428 	dhd_dump_intr_counters(dhdp, strbuf);
8429 	bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
8430 		dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr);
8431 	bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr));
8432 #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
8433 	bcm_bprintf(strbuf, "multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
8434 		dhdp->multi_client_flow_rings, dhdp->max_multi_client_flow_rings);
8435 #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
8436 	bcm_bprintf(strbuf,
8437 		"%4s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ",
8438 		"Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen",
8439 		" Overflows", "  RD", "  WR");
8440 
8441 #ifdef TX_STATUS_LATENCY_STATS
8442 	/* Average Tx status/Completion Latency in micro secs */
8443 	bcm_bprintf(strbuf, "%16s %16s ", "       NumTxPkts", "    AvgTxCmpL_Us");
8444 #endif /* TX_STATUS_LATENCY_STATS */
8445 
8446 	bcm_bprintf(strbuf, "\n");
8447 
8448 	for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
8449 		flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
8450 		if (!flow_ring_node->active)
8451 			continue;
8452 
8453 		flow_info = &flow_ring_node->flow_info;
8454 		bcm_bprintf(strbuf,
8455 			"%4d %4d %2d %4d "MACDBG" %4d %4d %6d %10u ", ix++,
8456 			flow_ring_node->flowid, flow_info->ifindex, flow_info->tid,
8457 			MAC2STRDBG(flow_info->da),
8458 			DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue),
8459 			DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)),
8460 			DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node->queue)),
8461 			DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue));
8462 		dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf,
8463 			"%4d %4d ");
8464 
8465 #ifdef TX_STATUS_LATENCY_STATS
8466 		bcm_bprintf(strbuf, "%16d %16d ",
8467 			flow_info->num_tx_pkts,
8468 			flow_info->num_tx_status ?
8469 			DIV_U64_BY_U64(flow_info->cum_tx_status_latency,
8470 			flow_info->num_tx_status) : 0);
8471 
8472 		ifindex = flow_info->ifindex;
8473 		ASSERT(ifindex < DHD_MAX_IFS);
8474 		if (ifindex < DHD_MAX_IFS) {
8475 			if_tx_status_latency[ifindex].num_tx_status += flow_info->num_tx_status;
8476 			if_tx_status_latency[ifindex].cum_tx_status_latency +=
8477 				flow_info->cum_tx_status_latency;
8478 		} else {
8479 			DHD_ERROR(("%s: Bad IF index: %d associated with flowid: %d\n",
8480 				__FUNCTION__, ifindex, flowid));
8481 		}
8482 #endif /* TX_STATUS_LATENCY_STATS */
8483 		bcm_bprintf(strbuf, "\n");
8484 	}
8485 
8486 #ifdef TX_STATUS_LATENCY_STATS
8487 	bcm_bprintf(strbuf, "\n%s  %16s  %16s\n", "If", "AvgTxCmpL_Us", "NumTxStatus");
8488 	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
8489 	for (ix = 0; ix < DHD_MAX_IFS; ix++) {
8490 		if (!if_flow_lkup[ix].status) {
8491 			continue;
8492 		}
8493 		bcm_bprintf(strbuf, "%2d  %16d  %16d\n",
8494 			ix,
8495 			if_tx_status_latency[ix].num_tx_status ?
8496 			DIV_U64_BY_U64(if_tx_status_latency[ix].cum_tx_status_latency,
8497 			if_tx_status_latency[ix].num_tx_status): 0,
8498 			if_tx_status_latency[ix].num_tx_status);
8499 	}
8500 #endif /* TX_STATUS_LATENCY_STATS */
8501 
8502 #ifdef DHD_HP2P
8503 	if (dhdp->hp2p_capable) {
8504 		bcm_bprintf(strbuf, "\n%s  %16s  %16s", "Flowid", "Tx_t0", "Tx_t1");
8505 
8506 		for (flowid = 0; flowid < MAX_HP2P_FLOWS; flowid++) {
8507 			hp2p_info_t *hp2p_info;
8508 			int bin;
8509 
8510 			hp2p_info = &dhdp->hp2p_info[flowid];
8511 			if (hp2p_info->num_timer_start == 0)
8512 				continue;
8513 
8514 			bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
8515 			bcm_bprintf(strbuf, "\n%s", "Bin");
8516 
8517 			for (bin = 0; bin < MAX_TX_HIST_BIN; bin++) {
8518 				bcm_bprintf(strbuf, "\n%2d %20d  %16d", bin,
8519 					hp2p_info->tx_t0[bin], hp2p_info->tx_t1[bin]);
8520 			}
8521 
8522 			bcm_bprintf(strbuf, "\n%s  %16s", "Flowid", "Rx_t0");
8523 			bcm_bprintf(strbuf, "\n%d", hp2p_info->flowid);
8524 			bcm_bprintf(strbuf, "\n%s", "Bin");
8525 
8526 			for (bin = 0; bin < MAX_RX_HIST_BIN; bin++) {
8527 				bcm_bprintf(strbuf, "\n%d %20d", bin,
8528 					hp2p_info->rx_t0[bin]);
8529 			}
8530 
8531 			bcm_bprintf(strbuf, "\n%s  %16s  %16s",
8532 				"Packet limit", "Timer limit", "Timer start");
8533 			bcm_bprintf(strbuf, "\n%d %24d %16d", hp2p_info->num_pkt_limit,
8534 				hp2p_info->num_timer_limit, hp2p_info->num_timer_start);
8535 		}
8536 
8537 		bcm_bprintf(strbuf, "\n");
8538 	}
8539 #endif /* DHD_HP2P */
8540 
8541 	bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt);
8542 	bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt);
8543 	bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt);
8544 	if (dhdp->d2h_hostrdy_supported) {
8545 		bcm_bprintf(strbuf, "hostready count:%d\n", dhdp->bus->hostready_count);
8546 	}
8547 	bcm_bprintf(strbuf, "d2h_intr_method -> %s\n",
8548 		dhdp->bus->d2h_intr_method ? "PCIE_MSI" : "PCIE_INTX");
8549 }
8550 
8551 #ifdef DNGL_AXI_ERROR_LOGGING
8552 bool
dhd_axi_sig_match(dhd_pub_t * dhdp)8553 dhd_axi_sig_match(dhd_pub_t *dhdp)
8554 {
8555 	uint32 axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, dhdp->axierror_logbuf_addr);
8556 
8557 	if (dhdp->dhd_induce_error == DHD_INDUCE_DROP_AXI_SIG) {
8558 		DHD_ERROR(("%s: Induce AXI signature drop\n", __FUNCTION__));
8559 		return FALSE;
8560 	}
8561 
8562 	DHD_ERROR(("%s: axi_tcm_addr: 0x%x, tcm range: 0x%x ~ 0x%x\n",
8563 		__FUNCTION__, axi_tcm_addr, dhdp->bus->dongle_ram_base,
8564 		dhdp->bus->dongle_ram_base + dhdp->bus->ramsize));
8565 	if (axi_tcm_addr >= dhdp->bus->dongle_ram_base &&
8566 	    axi_tcm_addr < dhdp->bus->dongle_ram_base + dhdp->bus->ramsize) {
8567 		uint32 axi_signature = dhdpcie_bus_rtcm32(dhdp->bus, (axi_tcm_addr +
8568 			OFFSETOF(hnd_ext_trap_axi_error_v1_t, signature)));
8569 		if (axi_signature == HND_EXT_TRAP_AXIERROR_SIGNATURE) {
8570 			return TRUE;
8571 		} else {
8572 			DHD_ERROR(("%s: No AXI signature: 0x%x\n",
8573 				__FUNCTION__, axi_signature));
8574 			return FALSE;
8575 		}
8576 	} else {
8577 		DHD_ERROR(("%s: No AXI shared tcm address debug info.\n", __FUNCTION__));
8578 		return FALSE;
8579 	}
8580 }
8581 
8582 void
dhd_axi_error(dhd_pub_t * dhdp)8583 dhd_axi_error(dhd_pub_t *dhdp)
8584 {
8585 	dhd_axi_error_dump_t *axi_err_dump;
8586 	uint8 *axi_err_buf = NULL;
8587 	uint8 *p_axi_err = NULL;
8588 	uint32 axi_logbuf_addr;
8589 	uint32 axi_tcm_addr;
8590 	int err, size;
8591 
8592 	OSL_DELAY(75000);
8593 
8594 	axi_logbuf_addr = dhdp->axierror_logbuf_addr;
8595 	if (!axi_logbuf_addr) {
8596 		DHD_ERROR(("%s: No AXI TCM address debug info.\n", __FUNCTION__));
8597 		goto sched_axi;
8598 	}
8599 
8600 	axi_err_dump = dhdp->axi_err_dump;
8601 	if (!axi_err_dump) {
8602 		goto sched_axi;
8603 	}
8604 
8605 	if (!dhd_axi_sig_match(dhdp)) {
8606 		goto sched_axi;
8607 	}
8608 
8609 	/* Reading AXI error data for SMMU fault */
8610 	DHD_ERROR(("%s: Read AXI data from TCM address\n", __FUNCTION__));
8611 	axi_tcm_addr = dhdpcie_bus_rtcm32(dhdp->bus, axi_logbuf_addr);
8612 	size = sizeof(hnd_ext_trap_axi_error_v1_t);
8613 	axi_err_buf = MALLOCZ(dhdp->osh, size);
8614 	if (axi_err_buf == NULL) {
8615 		DHD_ERROR(("%s: out of memory !\n", __FUNCTION__));
8616 		goto sched_axi;
8617 	}
8618 
8619 	p_axi_err = axi_err_buf;
8620 	err = dhdpcie_bus_membytes(dhdp->bus, FALSE, axi_tcm_addr, p_axi_err, size);
8621 	if (err) {
8622 		DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
8623 			__FUNCTION__, err, size, axi_tcm_addr));
8624 		goto sched_axi;
8625 	}
8626 
8627 	/* Dump data to Dmesg */
8628 	dhd_log_dump_axi_error(axi_err_buf);
8629 	err = memcpy_s(&axi_err_dump->etd_axi_error_v1, size, axi_err_buf, size);
8630 	if (err) {
8631 		DHD_ERROR(("%s: failed to copy etd axi error info, err=%d\n",
8632 			__FUNCTION__, err));
8633 	}
8634 
8635 sched_axi:
8636 	if (axi_err_buf) {
8637 		MFREE(dhdp->osh, axi_err_buf, size);
8638 	}
8639 	dhd_schedule_axi_error_dump(dhdp, NULL);
8640 }
8641 
8642 static void
dhd_log_dump_axi_error(uint8 * axi_err)8643 dhd_log_dump_axi_error(uint8 *axi_err)
8644 {
8645 	dma_dentry_v1_t dma_dentry;
8646 	dma_fifo_v1_t dma_fifo;
8647 	int i = 0, j = 0;
8648 
8649 	if (*(uint8 *)axi_err == HND_EXT_TRAP_AXIERROR_VERSION_1) {
8650 		hnd_ext_trap_axi_error_v1_t *axi_err_v1 = (hnd_ext_trap_axi_error_v1_t *)axi_err;
8651 		DHD_ERROR(("%s: signature : 0x%x\n", __FUNCTION__, axi_err_v1->signature));
8652 		DHD_ERROR(("%s: version : 0x%x\n", __FUNCTION__, axi_err_v1->version));
8653 		DHD_ERROR(("%s: length : 0x%x\n", __FUNCTION__, axi_err_v1->length));
8654 		DHD_ERROR(("%s: dma_fifo_valid_count : 0x%x\n",
8655 			__FUNCTION__, axi_err_v1->dma_fifo_valid_count));
8656 		DHD_ERROR(("%s: axi_errorlog_status : 0x%x\n",
8657 			__FUNCTION__, axi_err_v1->axi_errorlog_status));
8658 		DHD_ERROR(("%s: axi_errorlog_core : 0x%x\n",
8659 			__FUNCTION__, axi_err_v1->axi_errorlog_core));
8660 		DHD_ERROR(("%s: axi_errorlog_hi : 0x%x\n",
8661 			__FUNCTION__, axi_err_v1->axi_errorlog_hi));
8662 		DHD_ERROR(("%s: axi_errorlog_lo : 0x%x\n",
8663 			__FUNCTION__, axi_err_v1->axi_errorlog_lo));
8664 		DHD_ERROR(("%s: axi_errorlog_id : 0x%x\n",
8665 			__FUNCTION__, axi_err_v1->axi_errorlog_id));
8666 
8667 		for (i = 0; i < MAX_DMAFIFO_ENTRIES_V1; i++) {
8668 			dma_fifo = axi_err_v1->dma_fifo[i];
8669 			DHD_ERROR(("%s: valid:%d : 0x%x\n", __FUNCTION__, i, dma_fifo.valid));
8670 			DHD_ERROR(("%s: direction:%d : 0x%x\n",
8671 				__FUNCTION__, i, dma_fifo.direction));
8672 			DHD_ERROR(("%s: index:%d : 0x%x\n",
8673 				__FUNCTION__, i, dma_fifo.index));
8674 			DHD_ERROR(("%s: dpa:%d : 0x%x\n",
8675 				__FUNCTION__, i, dma_fifo.dpa));
8676 			DHD_ERROR(("%s: desc_lo:%d : 0x%x\n",
8677 				__FUNCTION__, i, dma_fifo.desc_lo));
8678 			DHD_ERROR(("%s: desc_hi:%d : 0x%x\n",
8679 				__FUNCTION__, i, dma_fifo.desc_hi));
8680 			DHD_ERROR(("%s: din:%d : 0x%x\n",
8681 				__FUNCTION__, i, dma_fifo.din));
8682 			DHD_ERROR(("%s: dout:%d : 0x%x\n",
8683 				__FUNCTION__, i, dma_fifo.dout));
8684 			for (j = 0; j < MAX_DMAFIFO_DESC_ENTRIES_V1; j++) {
8685 				dma_dentry = axi_err_v1->dma_fifo[i].dentry[j];
8686 				DHD_ERROR(("%s: ctrl1:%d : 0x%x\n",
8687 					__FUNCTION__, i, dma_dentry.ctrl1));
8688 				DHD_ERROR(("%s: ctrl2:%d : 0x%x\n",
8689 					__FUNCTION__, i, dma_dentry.ctrl2));
8690 				DHD_ERROR(("%s: addrlo:%d : 0x%x\n",
8691 					__FUNCTION__, i, dma_dentry.addrlo));
8692 				DHD_ERROR(("%s: addrhi:%d : 0x%x\n",
8693 					__FUNCTION__, i, dma_dentry.addrhi));
8694 			}
8695 		}
8696 	}
8697 	else {
8698 		DHD_ERROR(("%s: Invalid AXI version: 0x%x\n", __FUNCTION__, (*(uint8 *)axi_err)));
8699 	}
8700 }
8701 #endif /* DNGL_AXI_ERROR_LOGGING */
8702 
8703 /**
8704  * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
8705  * flow queue to their flow ring.
8706  */
8707 static void
dhd_update_txflowrings(dhd_pub_t * dhd)8708 dhd_update_txflowrings(dhd_pub_t *dhd)
8709 {
8710 	unsigned long flags;
8711 	dll_t *item, *next;
8712 	flow_ring_node_t *flow_ring_node;
8713 	struct dhd_bus *bus = dhd->bus;
8714 
8715 	if (dhd_query_bus_erros(dhd)) {
8716 		return;
8717 	}
8718 
8719 	/* Hold flowring_list_lock to ensure no race condition while accessing the List */
8720 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
8721 	for (item = dll_head_p(&bus->flowring_active_list);
8722 		(!dhd_is_device_removed(dhd) && !dll_end(&bus->flowring_active_list, item));
8723 		item = next) {
8724 		if (dhd->hang_was_sent) {
8725 			break;
8726 		}
8727 
8728 		next = dll_next_p(item);
8729 		flow_ring_node = dhd_constlist_to_flowring(item);
8730 
8731 		/* Ensure that flow_ring_node in the list is Not Null */
8732 		ASSERT(flow_ring_node != NULL);
8733 
8734 		/* Ensure that the flowring node has valid contents */
8735 		ASSERT(flow_ring_node->prot_info != NULL);
8736 
8737 		dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
8738 	}
8739 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
8740 }
8741 
8742 /** Mailbox ringbell Function */
8743 static void
dhd_bus_gen_devmb_intr(struct dhd_bus * bus)8744 dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
8745 {
8746 	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
8747 		(bus->sih->buscorerev == 4)) {
8748 		DHD_ERROR(("mailbox communication not supported\n"));
8749 		return;
8750 	}
8751 	if (bus->db1_for_mb)  {
8752 		/* this is a pcie core register, not the config register */
8753 		DHD_INFO(("writing a mail box interrupt to the device, through doorbell 1\n"));
8754 		if (DAR_PWRREQ(bus)) {
8755 			dhd_bus_pcie_pwr_req(bus);
8756 		}
8757 		si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db1_addr_get(bus),
8758 			~0, 0x12345678);
8759 	} else {
8760 		DHD_INFO(("writing a mail box interrupt to the device, through config space\n"));
8761 		dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
8762 		dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
8763 	}
8764 }
8765 
8766 /* Upon receiving a mailbox interrupt,
8767  * if H2D_FW_TRAP bit is set in mailbox location
8768  * device traps
8769  */
8770 static void
dhdpcie_fw_trap(dhd_bus_t * bus)8771 dhdpcie_fw_trap(dhd_bus_t *bus)
8772 {
8773 	/* Send the mailbox data and generate mailbox intr. */
8774 	dhdpcie_send_mb_data(bus, H2D_FW_TRAP);
8775 	/* For FWs that cannot interprete H2D_FW_TRAP */
8776 	(void)dhd_wl_ioctl_set_intiovar(bus->dhd, "bus:disconnect", 99, WLC_SET_VAR, TRUE, 0);
8777 }
8778 
8779 /** mailbox doorbell ring function */
8780 void
dhd_bus_ringbell(struct dhd_bus * bus,uint32 value)8781 dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
8782 {
8783 	/* Skip after sending D3_INFORM */
8784 	if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
8785 		DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
8786 			__FUNCTION__, bus->bus_low_power_state));
8787 		return;
8788 	}
8789 
8790 	/* Skip in the case of link down */
8791 	if (bus->is_linkdown) {
8792 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
8793 		return;
8794 	}
8795 
8796 	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
8797 		(bus->sih->buscorerev == 4)) {
8798 		si_corereg(bus->sih, bus->sih->buscoreidx, bus->pcie_mailbox_int,
8799 			PCIE_INTB, PCIE_INTB);
8800 	} else {
8801 		/* this is a pcie core register, not the config regsiter */
8802 		DHD_INFO(("writing a door bell to the device\n"));
8803 		if (IDMA_ACTIVE(bus->dhd)) {
8804 			if (DAR_PWRREQ(bus)) {
8805 				dhd_bus_pcie_pwr_req(bus);
8806 			}
8807 			si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
8808 				~0, value);
8809 		} else {
8810 			if (DAR_PWRREQ(bus)) {
8811 				dhd_bus_pcie_pwr_req(bus);
8812 			}
8813 			si_corereg(bus->sih, bus->sih->buscoreidx,
8814 				dhd_bus_db0_addr_get(bus), ~0, 0x12345678);
8815 		}
8816 	}
8817 }
8818 
8819 /** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */
8820 void
dhd_bus_ringbell_2(struct dhd_bus * bus,uint32 value,bool devwake)8821 dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake)
8822 {
8823 	/* this is a pcie core register, not the config regsiter */
8824 	/* Skip after sending D3_INFORM */
8825 	if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
8826 		DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
8827 			__FUNCTION__, bus->bus_low_power_state));
8828 		return;
8829 	}
8830 
8831 	/* Skip in the case of link down */
8832 	if (bus->is_linkdown) {
8833 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
8834 		return;
8835 	}
8836 
8837 	DHD_INFO(("writing a door bell 2 to the device\n"));
8838 	if (DAR_PWRREQ(bus)) {
8839 		dhd_bus_pcie_pwr_req(bus);
8840 	}
8841 	si_corereg(bus->sih, bus->sih->buscoreidx, dhd_bus_db0_addr_2_get(bus),
8842 		~0, value);
8843 }
8844 
8845 void
dhdpcie_bus_ringbell_fast(struct dhd_bus * bus,uint32 value)8846 dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
8847 {
8848 	/* Skip after sending D3_INFORM */
8849 	if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
8850 		DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
8851 			__FUNCTION__, bus->bus_low_power_state));
8852 		return;
8853 	}
8854 
8855 	/* Skip in the case of link down */
8856 	if (bus->is_linkdown) {
8857 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
8858 		return;
8859 	}
8860 
8861 	if (DAR_PWRREQ(bus)) {
8862 		dhd_bus_pcie_pwr_req(bus);
8863 	}
8864 
8865 #ifdef DHD_DB0TS
8866 	if (bus->dhd->db0ts_capable) {
8867 		uint64 ts;
8868 
8869 		ts = local_clock();
8870 		do_div(ts, 1000);
8871 
8872 		value = htol32(ts & 0xFFFFFFFF);
8873 		DHD_INFO(("%s: usec timer = 0x%x\n", __FUNCTION__, value));
8874 	}
8875 #endif /* DHD_DB0TS */
8876 	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
8877 }
8878 
8879 void
dhdpcie_bus_ringbell_2_fast(struct dhd_bus * bus,uint32 value,bool devwake)8880 dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake)
8881 {
8882 	/* Skip after sending D3_INFORM */
8883 	if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
8884 		DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
8885 			__FUNCTION__, bus->bus_low_power_state));
8886 		return;
8887 	}
8888 
8889 	/* Skip in the case of link down */
8890 	if (bus->is_linkdown) {
8891 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
8892 		return;
8893 	}
8894 
8895 	if (DAR_PWRREQ(bus)) {
8896 		dhd_bus_pcie_pwr_req(bus);
8897 	}
8898 	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_2_addr, value);
8899 }
8900 
8901 static void
dhd_bus_ringbell_oldpcie(struct dhd_bus * bus,uint32 value)8902 dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
8903 {
8904 	uint32 w;
8905 	/* Skip after sending D3_INFORM */
8906 	if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
8907 		DHD_ERROR(("%s: trying to ring the doorbell after D3 inform %d\n",
8908 			__FUNCTION__, bus->bus_low_power_state));
8909 		return;
8910 	}
8911 
8912 	/* Skip in the case of link down */
8913 	if (bus->is_linkdown) {
8914 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
8915 		return;
8916 	}
8917 
8918 	w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
8919 	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
8920 }
8921 
8922 dhd_mb_ring_t
dhd_bus_get_mbintr_fn(struct dhd_bus * bus)8923 dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
8924 {
8925 	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
8926 		(bus->sih->buscorerev == 4)) {
8927 		bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
8928 			bus->pcie_mailbox_int);
8929 		if (bus->pcie_mb_intr_addr) {
8930 			bus->pcie_mb_intr_osh = si_osh(bus->sih);
8931 			return dhd_bus_ringbell_oldpcie;
8932 		}
8933 	} else {
8934 		bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
8935 			dhd_bus_db0_addr_get(bus));
8936 		if (bus->pcie_mb_intr_addr) {
8937 			bus->pcie_mb_intr_osh = si_osh(bus->sih);
8938 			return dhdpcie_bus_ringbell_fast;
8939 		}
8940 	}
8941 	return dhd_bus_ringbell;
8942 }
8943 
8944 dhd_mb_ring_2_t
dhd_bus_get_mbintr_2_fn(struct dhd_bus * bus)8945 dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus)
8946 {
8947 	bus->pcie_mb_intr_2_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
8948 		dhd_bus_db0_addr_2_get(bus));
8949 	if (bus->pcie_mb_intr_2_addr) {
8950 		bus->pcie_mb_intr_osh = si_osh(bus->sih);
8951 		return dhdpcie_bus_ringbell_2_fast;
8952 	}
8953 	return dhd_bus_ringbell_2;
8954 }
8955 
8956 bool BCMFASTPATH
dhd_bus_dpc(struct dhd_bus * bus)8957 dhd_bus_dpc(struct dhd_bus *bus)
8958 {
8959 	bool resched = FALSE;	  /* Flag indicating resched wanted */
8960 	unsigned long flags;
8961 
8962 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
8963 
8964 	bus->dpc_entry_time = OSL_LOCALTIME_NS();
8965 
8966 	DHD_GENERAL_LOCK(bus->dhd, flags);
8967 	/* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
8968 	 * to avoid IOCTL Resumed On timeout when ioctl is waiting for response
8969 	 * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS
8970 	 * and if we return from here, then IOCTL response will never be handled
8971 	 */
8972 	if (bus->dhd->busstate == DHD_BUS_DOWN) {
8973 		DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
8974 		bus->intstatus = 0;
8975 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
8976 		bus->dpc_return_busdown_count++;
8977 		return 0;
8978 	}
8979 #ifdef DHD_PCIE_RUNTIMEPM
8980 	bus->idlecount = 0;
8981 #endif /* DHD_PCIE_RUNTIMEPM */
8982 	DHD_BUS_BUSY_SET_IN_DPC(bus->dhd);
8983 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
8984 
8985 	resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus);
8986 	if (!resched) {
8987 		bus->intstatus = 0;
8988 		bus->dpc_intr_enable_count++;
8989 		/* For Linux, Macos etc (otherthan NDIS) enable back the host interrupts
8990 		 * which has been disabled in the dhdpcie_bus_isr()
8991 		 */
8992 		 dhdpcie_bus_intr_enable(bus);
8993 		 dhdpcie_enable_irq(bus); /* Enable back interrupt!! */
8994 		bus->dpc_exit_time = OSL_LOCALTIME_NS();
8995 	} else {
8996 		bus->resched_dpc_time = OSL_LOCALTIME_NS();
8997 	}
8998 
8999 	bus->dpc_sched = resched;
9000 
9001 	DHD_GENERAL_LOCK(bus->dhd, flags);
9002 	DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd);
9003 	dhd_os_busbusy_wake(bus->dhd);
9004 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
9005 
9006 	return resched;
9007 
9008 }
9009 
9010 int
dhdpcie_send_mb_data(dhd_bus_t * bus,uint32 h2d_mb_data)9011 dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
9012 {
9013 	uint32 cur_h2d_mb_data = 0;
9014 
9015 	DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data));
9016 
9017 	if (bus->is_linkdown) {
9018 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
9019 		return BCME_ERROR;
9020 	}
9021 
9022 	if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !bus->use_mailbox) {
9023 		DHD_INFO(("API rev is 6, sending mb data as H2D Ctrl message to dongle, 0x%04x\n",
9024 			h2d_mb_data));
9025 		/* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */
9026 		{
9027 			if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus->dhd, h2d_mb_data)) {
9028 				DHD_ERROR(("failure sending the H2D Mailbox message "
9029 					"to firmware\n"));
9030 				goto fail;
9031 			}
9032 		}
9033 		goto done;
9034 	}
9035 
9036 	dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
9037 
9038 	if (cur_h2d_mb_data != 0) {
9039 		uint32 i = 0;
9040 		DHD_INFO(("GRRRRRRR: MB transaction is already pending 0x%04x\n", cur_h2d_mb_data));
9041 		while ((i++ < 100) && cur_h2d_mb_data) {
9042 			OSL_DELAY(10);
9043 			dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
9044 		}
9045 		if (i >= 100) {
9046 			DHD_ERROR(("%s : waited 1ms for the dngl "
9047 				"to ack the previous mb transaction\n", __FUNCTION__));
9048 			DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n",
9049 				__FUNCTION__, cur_h2d_mb_data));
9050 		}
9051 	}
9052 
9053 	dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0);
9054 	dhd_bus_gen_devmb_intr(bus);
9055 
9056 done:
9057 	if (h2d_mb_data == H2D_HOST_D3_INFORM) {
9058 		DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
9059 		bus->last_d3_inform_time = OSL_LOCALTIME_NS();
9060 		bus->d3_inform_cnt++;
9061 	}
9062 	if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) {
9063 		DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__));
9064 		bus->d0_inform_in_use_cnt++;
9065 	}
9066 	if (h2d_mb_data == H2D_HOST_D0_INFORM) {
9067 		DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__));
9068 		bus->d0_inform_cnt++;
9069 	}
9070 	return BCME_OK;
9071 fail:
9072 	return BCME_ERROR;
9073 }
9074 
9075 static void
dhd_bus_handle_d3_ack(dhd_bus_t * bus)9076 dhd_bus_handle_d3_ack(dhd_bus_t *bus)
9077 {
9078 	unsigned long flags_bus;
9079 	DHD_BUS_LOCK(bus->bus_lock, flags_bus);
9080 	bus->suspend_intr_disable_count++;
9081 	/* Disable dongle Interrupts Immediately after D3 */
9082 
9083 	/* For Linux, Macos etc (otherthan NDIS) along with disabling
9084 	 * dongle interrupt by clearing the IntMask, disable directly
9085 	 * interrupt from the host side as well. Also clear the intstatus
9086 	 * if it is set to avoid unnecessary intrrupts after D3 ACK.
9087 	 */
9088 	dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */
9089 	dhdpcie_bus_clear_intstatus(bus);
9090 	dhdpcie_disable_irq_nosync(bus); /* Disable host interrupt!! */
9091 
9092 	if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
9093 		/* Set bus_low_power_state to DHD_BUS_D3_ACK_RECIEVED */
9094 		bus->bus_low_power_state = DHD_BUS_D3_ACK_RECIEVED;
9095 		DHD_ERROR(("%s: D3_ACK Recieved\n", __FUNCTION__));
9096 	}
9097 	DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
9098 	/* Check for D3 ACK induce flag, which is set by firing dhd iovar to induce D3 Ack timeout.
9099 	 * If flag is set, D3 wake is skipped, which results in to D3 Ack timeout.
9100 	 */
9101 	if (bus->dhd->dhd_induce_error != DHD_INDUCE_D3_ACK_TIMEOUT) {
9102 		bus->wait_for_d3_ack = 1;
9103 		dhd_os_d3ack_wake(bus->dhd);
9104 	} else {
9105 		DHD_ERROR(("%s: Inducing D3 ACK timeout\n", __FUNCTION__));
9106 	}
9107 }
9108 void
dhd_bus_handle_mb_data(dhd_bus_t * bus,uint32 d2h_mb_data)9109 dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data)
9110 {
9111 	if (MULTIBP_ENAB(bus->sih)) {
9112 		dhd_bus_pcie_pwr_req(bus);
9113 	}
9114 
9115 	DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
9116 
9117 	if (d2h_mb_data & D2H_DEV_FWHALT) {
9118 		DHD_ERROR(("FW trap has happened\n"));
9119 		dhdpcie_checkdied(bus, NULL, 0);
9120 #ifdef OEM_ANDROID
9121 #ifdef SUPPORT_LINKDOWN_RECOVERY
9122 #ifdef CONFIG_ARCH_MSM
9123 		bus->no_cfg_restore = 1;
9124 #endif /* CONFIG_ARCH_MSM */
9125 #endif /* SUPPORT_LINKDOWN_RECOVERY */
9126 		dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO);
9127 #endif /* OEM_ANDROID */
9128 		goto exit;
9129 	}
9130 	if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ)  {
9131 		bool ds_acked = FALSE;
9132 		BCM_REFERENCE(ds_acked);
9133 		if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
9134 			DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n"));
9135 			DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
9136 			bus->dhd->busstate = DHD_BUS_DOWN;
9137 			goto exit;
9138 		}
9139 		/* what should we do */
9140 		DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
9141 		{
9142 			dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
9143 			DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
9144 		}
9145 	}
9146 	if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE)  {
9147 		/* what should we do */
9148 		DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
9149 	}
9150 	if (d2h_mb_data & D2HMB_DS_HOST_SLEEP_EXIT_ACK)  {
9151 		/* what should we do */
9152 		DHD_INFO(("D2H_MB_DATA: D0 ACK\n"));
9153 	}
9154 	if (d2h_mb_data & D2H_DEV_D3_ACK)  {
9155 		/* what should we do */
9156 		DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
9157 		if (!bus->wait_for_d3_ack) {
9158 #if defined(DHD_HANG_SEND_UP_TEST)
9159 			if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
9160 				DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
9161 			} else {
9162 				dhd_bus_handle_d3_ack(bus);
9163 			}
9164 #else /* DHD_HANG_SEND_UP_TEST */
9165 			dhd_bus_handle_d3_ack(bus);
9166 #endif /* DHD_HANG_SEND_UP_TEST */
9167 		}
9168 	}
9169 
9170 exit:
9171 	if (MULTIBP_ENAB(bus->sih)) {
9172 		dhd_bus_pcie_pwr_req_clear(bus);
9173 	}
9174 }
9175 
9176 static void
dhdpcie_handle_mb_data(dhd_bus_t * bus)9177 dhdpcie_handle_mb_data(dhd_bus_t *bus)
9178 {
9179 	uint32 d2h_mb_data = 0;
9180 	uint32 zero = 0;
9181 
9182 	if (MULTIBP_ENAB(bus->sih)) {
9183 		dhd_bus_pcie_pwr_req(bus);
9184 	}
9185 
9186 	dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
9187 	if (D2H_DEV_MB_INVALIDATED(d2h_mb_data)) {
9188 		DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n",
9189 			__FUNCTION__, d2h_mb_data));
9190 		goto exit;
9191 	}
9192 
9193 	dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
9194 
9195 	DHD_INFO_HW4(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
9196 	if (d2h_mb_data & D2H_DEV_FWHALT)  {
9197 		DHD_ERROR(("FW trap has happened\n"));
9198 		dhdpcie_checkdied(bus, NULL, 0);
9199 		/* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
9200 		goto exit;
9201 	}
9202 	if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ)  {
9203 		/* what should we do */
9204 		DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
9205 		dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
9206 		DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
9207 	}
9208 	if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE)  {
9209 		/* what should we do */
9210 		DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
9211 	}
9212 	if (d2h_mb_data & D2H_DEV_D3_ACK)  {
9213 		/* what should we do */
9214 		DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
9215 		if (!bus->wait_for_d3_ack) {
9216 #if defined(DHD_HANG_SEND_UP_TEST)
9217 			if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
9218 				DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
9219 			} else {
9220 			dhd_bus_handle_d3_ack(bus);
9221 			}
9222 #else /* DHD_HANG_SEND_UP_TEST */
9223 			dhd_bus_handle_d3_ack(bus);
9224 #endif /* DHD_HANG_SEND_UP_TEST */
9225 		}
9226 	}
9227 
9228 exit:
9229 	if (MULTIBP_ENAB(bus->sih)) {
9230 		dhd_bus_pcie_pwr_req_clear(bus);
9231 	}
9232 }
9233 
9234 static void
dhdpcie_read_handle_mb_data(dhd_bus_t * bus)9235 dhdpcie_read_handle_mb_data(dhd_bus_t *bus)
9236 {
9237 	uint32 d2h_mb_data = 0;
9238 	uint32 zero = 0;
9239 
9240 	if (bus->is_linkdown) {
9241 		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
9242 		return;
9243 	}
9244 
9245 	if (MULTIBP_ENAB(bus->sih)) {
9246 		dhd_bus_pcie_pwr_req(bus);
9247 	}
9248 
9249 	dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
9250 	if (!d2h_mb_data) {
9251 		goto exit;
9252 	}
9253 
9254 	dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
9255 
9256 	dhd_bus_handle_mb_data(bus, d2h_mb_data);
9257 
9258 exit:
9259 	if (MULTIBP_ENAB(bus->sih)) {
9260 		dhd_bus_pcie_pwr_req_clear(bus);
9261 	}
9262 }
9263 
9264 static bool
dhdpcie_bus_process_mailbox_intr(dhd_bus_t * bus,uint32 intstatus)9265 dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
9266 {
9267 	bool resched = FALSE;
9268 	unsigned long flags_bus;
9269 
9270 	if (MULTIBP_ENAB(bus->sih)) {
9271 		dhd_bus_pcie_pwr_req(bus);
9272 	}
9273 	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
9274 		(bus->sih->buscorerev == 4)) {
9275 		/* Msg stream interrupt */
9276 		if (intstatus & I_BIT1) {
9277 			resched = dhdpci_bus_read_frames(bus);
9278 		} else if (intstatus & I_BIT0) {
9279 			/* do nothing for Now */
9280 		}
9281 	} else {
9282 		if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
9283 			bus->api.handle_mb_data(bus);
9284 
9285 		/* Do no process any rings after recieving D3_ACK */
9286 		DHD_BUS_LOCK(bus->bus_lock, flags_bus);
9287 		if (bus->bus_low_power_state == DHD_BUS_D3_ACK_RECIEVED) {
9288 			DHD_ERROR(("%s: D3 Ack Recieved. "
9289 				"Skip processing rest of ring buffers.\n", __FUNCTION__));
9290 			DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
9291 			goto exit;
9292 		}
9293 		DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
9294 
9295 		/* Validate intstatus only for INTX case */
9296 		if ((bus->d2h_intr_method == PCIE_MSI) ||
9297 			((bus->d2h_intr_method == PCIE_INTX) && (intstatus & bus->d2h_mb_mask))) {
9298 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
9299 			if (pm_runtime_get(dhd_bus_to_dev(bus)) >= 0) {
9300 				resched = dhdpci_bus_read_frames(bus);
9301 				pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
9302 				pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
9303 			}
9304 #else
9305 			resched = dhdpci_bus_read_frames(bus);
9306 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
9307 		}
9308 	}
9309 
9310 exit:
9311 	if (MULTIBP_ENAB(bus->sih)) {
9312 		dhd_bus_pcie_pwr_req_clear(bus);
9313 	}
9314 	return resched;
9315 }
9316 
9317 #if defined(DHD_H2D_LOG_TIME_SYNC)
9318 static void
dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t * bus)9319 dhdpci_bus_rte_log_time_sync_poll(dhd_bus_t *bus)
9320 {
9321 	unsigned long time_elapsed;
9322 
9323 	/* Poll for timeout value periodically */
9324 	if ((bus->dhd->busstate == DHD_BUS_DATA) &&
9325 		(bus->dhd->dhd_rte_time_sync_ms != 0) &&
9326 		(bus->bus_low_power_state == DHD_BUS_NO_LOW_POWER_STATE)) {
9327 		time_elapsed = OSL_SYSUPTIME_US() - bus->dhd_rte_time_sync_count;
9328 		/* Compare time is milli seconds */
9329 		if ((time_elapsed / 1000) >= bus->dhd->dhd_rte_time_sync_ms) {
9330 			/*
9331 			 * Its fine, if it has crossed the timeout value. No need to adjust the
9332 			 * elapsed time
9333 			 */
9334 			bus->dhd_rte_time_sync_count += time_elapsed;
9335 
9336 			/* Schedule deffered work. Work function will send IOVAR. */
9337 			dhd_h2d_log_time_sync_deferred_wq_schedule(bus->dhd);
9338 		}
9339 	}
9340 }
9341 #endif /* DHD_H2D_LOG_TIME_SYNC */
9342 
9343 static bool
dhdpci_bus_read_frames(dhd_bus_t * bus)9344 dhdpci_bus_read_frames(dhd_bus_t *bus)
9345 {
9346 	bool more = FALSE;
9347 	unsigned long flags_bus;
9348 
9349 	/* First check if there a FW trap */
9350 	if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) &&
9351 		(bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) {
9352 #ifdef DNGL_AXI_ERROR_LOGGING
9353 		if (bus->dhd->axi_error) {
9354 			DHD_ERROR(("AXI Error happened\n"));
9355 			return FALSE;
9356 		}
9357 #endif /* DNGL_AXI_ERROR_LOGGING */
9358 		dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT);
9359 		return FALSE;
9360 	}
9361 
9362 	/* There may be frames in both ctrl buf and data buf; check ctrl buf first */
9363 	DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
9364 
9365 	dhd_prot_process_ctrlbuf(bus->dhd);
9366 	bus->last_process_ctrlbuf_time = OSL_LOCALTIME_NS();
9367 	/* Unlock to give chance for resp to be handled */
9368 	DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
9369 
9370 	/* Do not process rest of ring buf once bus enters low power state (D3_INFORM/D3_ACK) */
9371 	DHD_BUS_LOCK(bus->bus_lock, flags_bus);
9372 	if (bus->bus_low_power_state != DHD_BUS_NO_LOW_POWER_STATE) {
9373 		DHD_ERROR(("%s: Bus is in power save state (%d). "
9374 			"Skip processing rest of ring buffers.\n",
9375 			__FUNCTION__, bus->bus_low_power_state));
9376 		DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
9377 		return FALSE;
9378 	}
9379 	DHD_BUS_UNLOCK(bus->bus_lock, flags_bus);
9380 
9381 	DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
9382 	/* update the flow ring cpls */
9383 	dhd_update_txflowrings(bus->dhd);
9384 	bus->last_process_flowring_time = OSL_LOCALTIME_NS();
9385 
9386 	/* With heavy TX traffic, we could get a lot of TxStatus
9387 	 * so add bound
9388 	 */
9389 #ifdef DHD_HP2P
9390 	more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_HP2P_RING);
9391 #endif /* DHD_HP2P */
9392 	more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound, DHD_REGULAR_RING);
9393 	bus->last_process_txcpl_time = OSL_LOCALTIME_NS();
9394 
9395 	/* With heavy RX traffic, this routine potentially could spend some time
9396 	 * processing RX frames without RX bound
9397 	 */
9398 #ifdef DHD_HP2P
9399 	more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_HP2P_RING);
9400 #endif /* DHD_HP2P */
9401 	more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound, DHD_REGULAR_RING);
9402 	bus->last_process_rxcpl_time = OSL_LOCALTIME_NS();
9403 
9404 	/* Process info ring completion messages */
9405 #ifdef EWP_EDL
9406 	if (!bus->dhd->dongle_edl_support)
9407 #endif // endif
9408 	{
9409 		more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND);
9410 		bus->last_process_infocpl_time = OSL_LOCALTIME_NS();
9411 	}
9412 #ifdef EWP_EDL
9413 	else {
9414 		more |= dhd_prot_process_msgbuf_edl(bus->dhd);
9415 		bus->last_process_edl_time = OSL_LOCALTIME_NS();
9416 	}
9417 #endif /* EWP_EDL */
9418 
9419 #ifdef IDLE_TX_FLOW_MGMT
9420 	if (bus->enable_idle_flowring_mgmt) {
9421 		/* Look for idle flow rings */
9422 		dhd_bus_check_idle_scan(bus);
9423 	}
9424 #endif /* IDLE_TX_FLOW_MGMT */
9425 
9426 	/* don't talk to the dongle if fw is about to be reloaded */
9427 	if (bus->dhd->hang_was_sent) {
9428 		more = FALSE;
9429 	}
9430 	DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
9431 
9432 #ifdef SUPPORT_LINKDOWN_RECOVERY
9433 	if (bus->read_shm_fail) {
9434 		/* Read interrupt state once again to confirm linkdown */
9435 		int intstatus = si_corereg(bus->sih, bus->sih->buscoreidx,
9436 			bus->pcie_mailbox_int, 0, 0);
9437 		if (intstatus != (uint32)-1) {
9438 			DHD_ERROR(("%s: read SHM failed but intstatus is valid\n", __FUNCTION__));
9439 #ifdef DHD_FW_COREDUMP
9440 			if (bus->dhd->memdump_enabled) {
9441 				DHD_OS_WAKE_LOCK(bus->dhd);
9442 				bus->dhd->memdump_type = DUMP_TYPE_READ_SHM_FAIL;
9443 				dhd_bus_mem_dump(bus->dhd);
9444 				DHD_OS_WAKE_UNLOCK(bus->dhd);
9445 			}
9446 #endif /* DHD_FW_COREDUMP */
9447 		} else {
9448 			DHD_ERROR(("%s: Link is Down.\n", __FUNCTION__));
9449 #ifdef CONFIG_ARCH_MSM
9450 			bus->no_cfg_restore = 1;
9451 #endif /* CONFIG_ARCH_MSM */
9452 			bus->is_linkdown = 1;
9453 		}
9454 
9455 		dhd_prot_debug_info_print(bus->dhd);
9456 		bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN_EP_DETECT;
9457 		dhd_os_send_hang_message(bus->dhd);
9458 		more = FALSE;
9459 	}
9460 #endif /* SUPPORT_LINKDOWN_RECOVERY */
9461 #if defined(DHD_H2D_LOG_TIME_SYNC)
9462 	dhdpci_bus_rte_log_time_sync_poll(bus);
9463 #endif /* DHD_H2D_LOG_TIME_SYNC */
9464 	return more;
9465 }
9466 
9467 bool
dhdpcie_tcm_valid(dhd_bus_t * bus)9468 dhdpcie_tcm_valid(dhd_bus_t *bus)
9469 {
9470 	uint32 addr = 0;
9471 	int rv;
9472 	uint32 shaddr = 0;
9473 	pciedev_shared_t sh;
9474 
9475 	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
9476 
9477 	/* Read last word in memory to determine address of pciedev_shared structure */
9478 	addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
9479 
9480 	if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
9481 		(addr > shaddr)) {
9482 		DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n",
9483 			__FUNCTION__, addr));
9484 		return FALSE;
9485 	}
9486 
9487 	/* Read hndrte_shared structure */
9488 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh,
9489 		sizeof(pciedev_shared_t))) < 0) {
9490 		DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
9491 		return FALSE;
9492 	}
9493 
9494 	/* Compare any field in pciedev_shared_t */
9495 	if (sh.console_addr != bus->pcie_sh->console_addr) {
9496 		DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n"));
9497 		return FALSE;
9498 	}
9499 
9500 	return TRUE;
9501 }
9502 
9503 static void
dhdpcie_update_bus_api_revisions(uint32 firmware_api_version,uint32 host_api_version)9504 dhdpcie_update_bus_api_revisions(uint32 firmware_api_version, uint32 host_api_version)
9505 {
9506 	snprintf(bus_api_revision, BUS_API_REV_STR_LEN, "\nBus API revisions:(FW rev%d)(DHD rev%d)",
9507 			firmware_api_version, host_api_version);
9508 	return;
9509 }
9510 
9511 static bool
dhdpcie_check_firmware_compatible(uint32 firmware_api_version,uint32 host_api_version)9512 dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version)
9513 {
9514 	bool retcode = FALSE;
9515 
9516 	DHD_INFO(("firmware api revision %d, host api revision %d\n",
9517 		firmware_api_version, host_api_version));
9518 
9519 	switch (firmware_api_version) {
9520 	case PCIE_SHARED_VERSION_7:
9521 	case PCIE_SHARED_VERSION_6:
9522 	case PCIE_SHARED_VERSION_5:
9523 		retcode = TRUE;
9524 		break;
9525 	default:
9526 		if (firmware_api_version <= host_api_version)
9527 			retcode = TRUE;
9528 	}
9529 	return retcode;
9530 }
9531 
9532 static int
dhdpcie_readshared_console(dhd_bus_t * bus)9533 dhdpcie_readshared_console(dhd_bus_t *bus)
9534 {
9535 	uint32 addr = 0;
9536 	uint32 shaddr = 0;
9537 	int rv;
9538 	pciedev_shared_t *sh = bus->pcie_sh;
9539 	dhd_timeout_t tmo;
9540 
9541 	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
9542 	/* start a timer for 5 seconds */
9543 	dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
9544 
9545 	while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
9546 		/* Read last word in memory to determine address of pciedev_shared structure */
9547 		addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
9548 	}
9549 
9550 	if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
9551 		(addr > shaddr)) {
9552 		DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
9553 			__FUNCTION__, addr));
9554 		DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed));
9555 		return BCME_ERROR;
9556 	} else {
9557 		bus->shared_addr = (ulong)addr;
9558 		DHD_ERROR(("%s:PCIe shared addr (0x%08x) read took %u usec "
9559 			"before dongle is ready\n", __FUNCTION__, addr, tmo.elapsed));
9560 	}
9561 
9562 	/* Read hndrte_shared structure */
9563 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
9564 		sizeof(pciedev_shared_t))) < 0) {
9565 		DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
9566 		return rv;
9567 	}
9568 
9569 	/* Endianness */
9570 	sh->console_addr = ltoh32(sh->console_addr);
9571 	/* load bus console address */
9572 	bus->console_addr = sh->console_addr;
9573 
9574 	return BCME_OK;
9575 } /* dhdpcie_readshared_console */
9576 
9577 static int
dhdpcie_readshared(dhd_bus_t * bus)9578 dhdpcie_readshared(dhd_bus_t *bus)
9579 {
9580 	uint32 addr = 0;
9581 	int rv, dma_indx_wr_buf, dma_indx_rd_buf;
9582 	uint32 shaddr = 0;
9583 	pciedev_shared_t *sh = bus->pcie_sh;
9584 	dhd_timeout_t tmo;
9585 	bool idma_en = FALSE;
9586 
9587 	if (MULTIBP_ENAB(bus->sih)) {
9588 		dhd_bus_pcie_pwr_req(bus);
9589 	}
9590 
9591 	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
9592 	/* start a timer for 5 seconds */
9593 	dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
9594 
9595 	while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
9596 		/* Read last word in memory to determine address of pciedev_shared structure */
9597 		addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
9598 	}
9599 
9600 	if (addr == (uint32)-1) {
9601 		DHD_ERROR(("%s: PCIe link might be down\n", __FUNCTION__));
9602 #ifdef SUPPORT_LINKDOWN_RECOVERY
9603 #ifdef CONFIG_ARCH_MSM
9604 		bus->no_cfg_restore = 1;
9605 #endif /* CONFIG_ARCH_MSM */
9606 #endif /* SUPPORT_LINKDOWN_RECOVERY */
9607 		bus->is_linkdown = 1;
9608 		return BCME_ERROR;
9609 	}
9610 
9611 	if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
9612 		(addr > shaddr)) {
9613 		DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
9614 			__FUNCTION__, addr));
9615 		DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed));
9616 #ifdef DEBUG_DNGL_INIT_FAIL
9617 		if (addr != (uint32)-1) {	/* skip further PCIE reads if read this addr */
9618 #ifdef CUSTOMER_HW4_DEBUG
9619 			bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
9620 #endif /* CUSTOMER_HW4_DEBUG */
9621 			if (bus->dhd->memdump_enabled) {
9622 				bus->dhd->memdump_type = DUMP_TYPE_DONGLE_INIT_FAILURE;
9623 				dhdpcie_mem_dump(bus);
9624 			}
9625 		}
9626 #endif /* DEBUG_DNGL_INIT_FAIL */
9627 		return BCME_ERROR;
9628 	} else {
9629 		bus->shared_addr = (ulong)addr;
9630 		DHD_ERROR(("PCIe shared addr (0x%08x) read took %u usec "
9631 			"before dongle is ready\n", addr, tmo.elapsed));
9632 	}
9633 
9634 	/* Read hndrte_shared structure */
9635 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
9636 		sizeof(pciedev_shared_t))) < 0) {
9637 		DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
9638 		return rv;
9639 	}
9640 
9641 	/* Endianness */
9642 	sh->flags = ltoh32(sh->flags);
9643 	sh->trap_addr = ltoh32(sh->trap_addr);
9644 	sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
9645 	sh->assert_file_addr = ltoh32(sh->assert_file_addr);
9646 	sh->assert_line = ltoh32(sh->assert_line);
9647 	sh->console_addr = ltoh32(sh->console_addr);
9648 	sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
9649 	sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
9650 	sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
9651 	sh->flags2 = ltoh32(sh->flags2);
9652 
9653 	/* load bus console address */
9654 	bus->console_addr = sh->console_addr;
9655 
9656 	/* Read the dma rx offset */
9657 	bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
9658 	dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
9659 
9660 	DHD_INFO(("DMA RX offset from shared Area %d\n", bus->dma_rxoffset));
9661 
9662 	bus->api.fw_rev = sh->flags & PCIE_SHARED_VERSION_MASK;
9663 	if (!(dhdpcie_check_firmware_compatible(bus->api.fw_rev, PCIE_SHARED_VERSION)))
9664 	{
9665 		DHD_ERROR(("%s: pcie_shared version %d in dhd "
9666 		           "is older than pciedev_shared version %d in dongle\n",
9667 		           __FUNCTION__, PCIE_SHARED_VERSION,
9668 		           bus->api.fw_rev));
9669 		return BCME_ERROR;
9670 	}
9671 	dhdpcie_update_bus_api_revisions(bus->api.fw_rev, PCIE_SHARED_VERSION);
9672 
9673 	bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ?
9674 		sizeof(uint16) : sizeof(uint32);
9675 	DHD_INFO(("%s: Dongle advertizes %d size indices\n",
9676 		__FUNCTION__, bus->rw_index_sz));
9677 
9678 #ifdef IDLE_TX_FLOW_MGMT
9679 	if (sh->flags & PCIE_SHARED_IDLE_FLOW_RING) {
9680 		DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n",
9681 			__FUNCTION__));
9682 		bus->enable_idle_flowring_mgmt = TRUE;
9683 	}
9684 #endif /* IDLE_TX_FLOW_MGMT */
9685 
9686 	if (IDMA_CAPABLE(bus)) {
9687 		if (bus->sih->buscorerev == 23) {
9688 		} else {
9689 			idma_en = TRUE;
9690 		}
9691 	}
9692 
9693 	/* TODO: This need to be selected based on IPC instead of compile time */
9694 	bus->dhd->hwa_enable = TRUE;
9695 
9696 	if (idma_en) {
9697 		bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE;
9698 		bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE;
9699 	}
9700 
9701 	bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
9702 
9703 	bus->dhd->dar_enable = (sh->flags & PCIE_SHARED_DAR) ? TRUE : FALSE;
9704 
9705 	/* Does the FW support DMA'ing r/w indices */
9706 	if (sh->flags & PCIE_SHARED_DMA_INDEX) {
9707 		if (!bus->dhd->dma_ring_upd_overwrite) {
9708 			{
9709 				if (!IFRM_ENAB(bus->dhd)) {
9710 					bus->dhd->dma_h2d_ring_upd_support = TRUE;
9711 				}
9712 				bus->dhd->dma_d2h_ring_upd_support = TRUE;
9713 			}
9714 		}
9715 
9716 		if (bus->dhd->dma_d2h_ring_upd_support)
9717 			bus->dhd->d2h_sync_mode = 0;
9718 
9719 		DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
9720 			__FUNCTION__,
9721 			(bus->dhd->dma_h2d_ring_upd_support ? 1 : 0),
9722 			(bus->dhd->dma_d2h_ring_upd_support ? 1 : 0)));
9723 	} else if (!(sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK)) {
9724 		DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n",
9725 			__FUNCTION__));
9726 		return BCME_UNSUPPORTED;
9727 	} else {
9728 		bus->dhd->dma_h2d_ring_upd_support = FALSE;
9729 		bus->dhd->dma_d2h_ring_upd_support = FALSE;
9730 	}
9731 
9732 	/* Does the firmware support fast delete ring? */
9733 	if (sh->flags2 & PCIE_SHARED2_FAST_DELETE_RING) {
9734 		DHD_INFO(("%s: Firmware supports fast delete ring\n",
9735 			__FUNCTION__));
9736 		bus->dhd->fast_delete_ring_support = TRUE;
9737 	} else {
9738 		DHD_INFO(("%s: Firmware does not support fast delete ring\n",
9739 			__FUNCTION__));
9740 		bus->dhd->fast_delete_ring_support = FALSE;
9741 	}
9742 
9743 	/* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
9744 	{
9745 		ring_info_t  ring_info;
9746 
9747 		/* boundary check */
9748 		if (sh->rings_info_ptr > shaddr) {
9749 			DHD_ERROR(("%s: rings_info_ptr is invalid 0x%x, skip reading ring info\n",
9750 				__FUNCTION__, sh->rings_info_ptr));
9751 			return BCME_ERROR;
9752 		}
9753 
9754 		if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
9755 			(uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
9756 			return rv;
9757 
9758 		bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
9759 		bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
9760 
9761 		if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
9762 			bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
9763 			bus->max_submission_rings = ltoh16(ring_info.max_submission_queues);
9764 			bus->max_completion_rings = ltoh16(ring_info.max_completion_rings);
9765 			bus->max_cmn_rings = bus->max_submission_rings - bus->max_tx_flowrings;
9766 			bus->api.handle_mb_data = dhdpcie_read_handle_mb_data;
9767 			bus->use_mailbox = sh->flags & PCIE_SHARED_USE_MAILBOX;
9768 		}
9769 		else {
9770 			bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
9771 			bus->max_submission_rings = bus->max_tx_flowrings;
9772 			bus->max_completion_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
9773 			bus->max_cmn_rings = BCMPCIE_H2D_COMMON_MSGRINGS;
9774 			bus->api.handle_mb_data = dhdpcie_handle_mb_data;
9775 			bus->use_mailbox = TRUE;
9776 		}
9777 		if (bus->max_completion_rings == 0) {
9778 			DHD_ERROR(("dongle completion rings are invalid %d\n",
9779 				bus->max_completion_rings));
9780 			return BCME_ERROR;
9781 		}
9782 		if (bus->max_submission_rings == 0) {
9783 			DHD_ERROR(("dongle submission rings are invalid %d\n",
9784 				bus->max_submission_rings));
9785 			return BCME_ERROR;
9786 		}
9787 		if (bus->max_tx_flowrings == 0) {
9788 			DHD_ERROR(("dongle txflow rings are invalid %d\n", bus->max_tx_flowrings));
9789 			return BCME_ERROR;
9790 		}
9791 
9792 		/* If both FW and Host support DMA'ing indices, allocate memory and notify FW
9793 		 * The max_sub_queues is read from FW initialized ring_info
9794 		 */
9795 		if (bus->dhd->dma_h2d_ring_upd_support || IDMA_ENAB(bus->dhd)) {
9796 			dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
9797 				H2D_DMA_INDX_WR_BUF, bus->max_submission_rings);
9798 			dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
9799 				D2H_DMA_INDX_RD_BUF, bus->max_completion_rings);
9800 
9801 			if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
9802 				DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
9803 						"Host will use w/r indices in TCM\n",
9804 						__FUNCTION__));
9805 				bus->dhd->dma_h2d_ring_upd_support = FALSE;
9806 				bus->dhd->idma_enable = FALSE;
9807 			}
9808 		}
9809 
9810 		if (bus->dhd->dma_d2h_ring_upd_support) {
9811 			dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
9812 				D2H_DMA_INDX_WR_BUF, bus->max_completion_rings);
9813 			dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
9814 				H2D_DMA_INDX_RD_BUF, bus->max_submission_rings);
9815 
9816 			if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
9817 				DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
9818 						"Host will use w/r indices in TCM\n",
9819 						__FUNCTION__));
9820 				bus->dhd->dma_d2h_ring_upd_support = FALSE;
9821 			}
9822 		}
9823 
9824 		if (IFRM_ENAB(bus->dhd)) {
9825 			dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
9826 				H2D_IFRM_INDX_WR_BUF, bus->max_tx_flowrings);
9827 
9828 			if (dma_indx_wr_buf != BCME_OK) {
9829 				DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n",
9830 						__FUNCTION__));
9831 				bus->dhd->ifrm_enable = FALSE;
9832 			}
9833 		}
9834 
9835 		/* read ringmem and ringstate ptrs from shared area and store in host variables */
9836 		dhd_fillup_ring_sharedptr_info(bus, &ring_info);
9837 		if (dhd_msg_level & DHD_INFO_VAL) {
9838 			bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
9839 		}
9840 		DHD_INFO(("ring_info\n"));
9841 
9842 		DHD_ERROR(("%s: max H2D queues %d\n",
9843 			__FUNCTION__, ltoh16(ring_info.max_tx_flowrings)));
9844 
9845 		DHD_INFO(("mail box address\n"));
9846 		DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n",
9847 			__FUNCTION__, bus->h2d_mb_data_ptr_addr));
9848 		DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n",
9849 			__FUNCTION__, bus->d2h_mb_data_ptr_addr));
9850 	}
9851 
9852 	DHD_INFO(("%s: d2h_sync_mode 0x%08x\n",
9853 		__FUNCTION__, bus->dhd->d2h_sync_mode));
9854 
9855 	bus->dhd->d2h_hostrdy_supported =
9856 		((sh->flags & PCIE_SHARED_HOSTRDY_SUPPORT) == PCIE_SHARED_HOSTRDY_SUPPORT);
9857 
9858 	bus->dhd->ext_trap_data_supported =
9859 		((sh->flags2 & PCIE_SHARED2_EXTENDED_TRAP_DATA) == PCIE_SHARED2_EXTENDED_TRAP_DATA);
9860 
9861 	if ((sh->flags2 & PCIE_SHARED2_TXSTATUS_METADATA) == 0)
9862 		bus->dhd->pcie_txs_metadata_enable = 0;
9863 
9864 	bus->dhd->hscb_enable =
9865 		(sh->flags2 & PCIE_SHARED2_HSCB) == PCIE_SHARED2_HSCB;
9866 
9867 #ifdef EWP_EDL
9868 	if (host_edl_support) {
9869 		bus->dhd->dongle_edl_support = (sh->flags2 & PCIE_SHARED2_EDL_RING) ? TRUE : FALSE;
9870 		DHD_ERROR(("Dongle EDL support: %u\n", bus->dhd->dongle_edl_support));
9871 	}
9872 #endif /* EWP_EDL */
9873 
9874 	bus->dhd->debug_buf_dest_support =
9875 		(sh->flags2 & PCIE_SHARED2_DEBUG_BUF_DEST) ? TRUE : FALSE;
9876 	DHD_ERROR(("FW supports debug buf dest ? %s \n",
9877 		bus->dhd->debug_buf_dest_support ? "Y" : "N"));
9878 
9879 #ifdef DHD_HP2P
9880 	if (bus->dhd->hp2p_enable) {
9881 		bus->dhd->hp2p_ts_capable =
9882 			(sh->flags2 & PCIE_SHARED2_PKT_TIMESTAMP) == PCIE_SHARED2_PKT_TIMESTAMP;
9883 		bus->dhd->hp2p_capable =
9884 			(sh->flags2 & PCIE_SHARED2_HP2P) == PCIE_SHARED2_HP2P;
9885 		bus->dhd->hp2p_capable &= bus->dhd->hp2p_ts_capable;
9886 
9887 		DHD_ERROR(("FW supports HP2P ? %s \n",
9888 			bus->dhd->hp2p_capable ? "Y" : "N"));
9889 
9890 		if (bus->dhd->hp2p_capable) {
9891 			bus->dhd->pkt_thresh = HP2P_PKT_THRESH;
9892 			bus->dhd->pkt_expiry = HP2P_PKT_EXPIRY;
9893 			bus->dhd->time_thresh = HP2P_TIME_THRESH;
9894 			for (addr = 0; addr < MAX_HP2P_FLOWS; addr++) {
9895 				hp2p_info_t *hp2p_info = &bus->dhd->hp2p_info[addr];
9896 
9897 				hp2p_info->hrtimer_init = FALSE;
9898 				hp2p_info->timer.function = &dhd_hp2p_write;
9899 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21))
9900 				tasklet_hrtimer_init(&hp2p_info->timer,
9901 					dhd_hp2p_write, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
9902 #else
9903 				hrtimer_init(&hp2p_info->timer, CLOCK_MONOTONIC,
9904 					HRTIMER_MODE_REL_SOFT);
9905 #endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(5, 1, 21 */
9906 			}
9907 		}
9908 	}
9909 #endif /* DHD_HP2P */
9910 
9911 #ifdef DHD_DB0TS
9912 	bus->dhd->db0ts_capable =
9913 		(sh->flags & PCIE_SHARED_TIMESTAMP_DB0) == PCIE_SHARED_TIMESTAMP_DB0;
9914 #endif /* DHD_DB0TS */
9915 
9916 	if (MULTIBP_ENAB(bus->sih)) {
9917 		dhd_bus_pcie_pwr_req_clear(bus);
9918 
9919 		/*
9920 		 * WAR to fix ARM cold boot;
9921 		 * De-assert WL domain in DAR
9922 		 */
9923 		if (bus->sih->buscorerev >= 68) {
9924 			dhd_bus_pcie_pwr_req_wl_domain(bus, FALSE);
9925 		}
9926 	}
9927 	return BCME_OK;
9928 } /* dhdpcie_readshared */
9929 
9930 /** Read ring mem and ring state ptr info from shared memory area in device memory */
9931 static void
dhd_fillup_ring_sharedptr_info(dhd_bus_t * bus,ring_info_t * ring_info)9932 dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info)
9933 {
9934 	uint16 i = 0;
9935 	uint16 j = 0;
9936 	uint32 tcm_memloc;
9937 	uint32	d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr;
9938 	uint16  max_tx_flowrings = bus->max_tx_flowrings;
9939 
9940 	/* Ring mem ptr info */
9941 	/* Alloated in the order
9942 		H2D_MSGRING_CONTROL_SUBMIT              0
9943 		H2D_MSGRING_RXPOST_SUBMIT               1
9944 		D2H_MSGRING_CONTROL_COMPLETE            2
9945 		D2H_MSGRING_TX_COMPLETE                 3
9946 		D2H_MSGRING_RX_COMPLETE                 4
9947 	*/
9948 
9949 	{
9950 		/* ringmemptr holds start of the mem block address space */
9951 		tcm_memloc = ltoh32(ring_info->ringmem_ptr);
9952 
9953 		/* Find out ringmem ptr for each ring common  ring */
9954 		for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) {
9955 			bus->ring_sh[i].ring_mem_addr = tcm_memloc;
9956 			/* Update mem block */
9957 			tcm_memloc = tcm_memloc + sizeof(ring_mem_t);
9958 			DHD_INFO(("ring id %d ring mem addr 0x%04x \n",
9959 				i, bus->ring_sh[i].ring_mem_addr));
9960 		}
9961 	}
9962 
9963 	/* Ring state mem ptr info */
9964 	{
9965 		d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr);
9966 		d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr);
9967 		h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr);
9968 		h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr);
9969 
9970 		/* Store h2d common ring write/read pointers */
9971 		for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) {
9972 			bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
9973 			bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
9974 
9975 			/* update mem block */
9976 			h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
9977 			h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
9978 
9979 			DHD_INFO(("h2d w/r : idx %d write %x read %x \n", i,
9980 				bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
9981 		}
9982 
9983 		/* Store d2h common ring write/read pointers */
9984 		for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) {
9985 			bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
9986 			bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
9987 
9988 			/* update mem block */
9989 			d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
9990 			d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
9991 
9992 			DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
9993 				bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
9994 		}
9995 
9996 		/* Store txflow ring write/read pointers */
9997 		if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
9998 			max_tx_flowrings -= BCMPCIE_H2D_COMMON_MSGRINGS;
9999 		} else {
10000 			/* Account for Debug info h2d ring located after the last tx flow ring */
10001 			max_tx_flowrings = max_tx_flowrings + 1;
10002 		}
10003 		for (j = 0; j < max_tx_flowrings; i++, j++)
10004 		{
10005 			bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
10006 			bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
10007 
10008 			/* update mem block */
10009 			h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
10010 			h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
10011 
10012 			DHD_INFO(("FLOW Rings h2d w/r : idx %d write %x read %x \n", i,
10013 				bus->ring_sh[i].ring_state_w,
10014 				bus->ring_sh[i].ring_state_r));
10015 		}
10016 		/* store wr/rd pointers for  debug info completion ring */
10017 		bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
10018 		bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
10019 		d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
10020 		d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
10021 		DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
10022 			bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
10023 	}
10024 } /* dhd_fillup_ring_sharedptr_info */
10025 
10026 /**
10027  * Initialize bus module: prepare for communication with the dongle. Called after downloading
10028  * firmware into the dongle.
10029  */
dhd_bus_init(dhd_pub_t * dhdp,bool enforce_mutex)10030 int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
10031 {
10032 	dhd_bus_t *bus = dhdp->bus;
10033 	int  ret = 0;
10034 
10035 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
10036 
10037 	ASSERT(bus->dhd);
10038 	if (!bus->dhd)
10039 		return 0;
10040 
10041 	if (PCIE_RELOAD_WAR_ENAB(bus->sih->buscorerev)) {
10042 		dhd_bus_pcie_pwr_req_clear_reload_war(bus);
10043 	}
10044 
10045 	if (MULTIBP_ENAB(bus->sih)) {
10046 		dhd_bus_pcie_pwr_req(bus);
10047 	}
10048 
10049 	/* Configure AER registers to log the TLP header */
10050 	dhd_bus_aer_config(bus);
10051 
10052 	/* Make sure we're talking to the core. */
10053 	bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
10054 	ASSERT(bus->reg != NULL);
10055 
10056 	/* before opening up bus for data transfer, check if shared are is intact */
10057 
10058 	/* Do minimum console buffer read */
10059 	/* This helps in getting trap messages if any */
10060 	if ((ret = dhdpcie_readshared_console(bus)) >= 0) {
10061 		if ((ret = dhdpcie_bus_readconsole(bus)) < 0) {
10062 			DHD_ERROR(("%s: Console buffer read failed\n",
10063 					__FUNCTION__));
10064 		}
10065 	}
10066 
10067 	ret = dhdpcie_readshared(bus);
10068 	if (ret < 0) {
10069 		DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
10070 		goto exit;
10071 	}
10072 
10073 	/* Make sure we're talking to the core. */
10074 	bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
10075 	ASSERT(bus->reg != NULL);
10076 
10077 	dhd_init_bus_lock(bus);
10078 
10079 	dhd_init_backplane_access_lock(bus);
10080 
10081 	/* Set bus state according to enable result */
10082 	dhdp->busstate = DHD_BUS_DATA;
10083 	bus->bus_low_power_state = DHD_BUS_NO_LOW_POWER_STATE;
10084 	dhdp->dhd_bus_busy_state = 0;
10085 
10086 	/* D11 status via PCIe completion header */
10087 	if ((ret = dhdpcie_init_d11status(bus)) < 0) {
10088 		goto exit;
10089 	}
10090 
10091 	if (!dhd_download_fw_on_driverload)
10092 		dhd_dpc_enable(bus->dhd);
10093 	/* Enable the interrupt after device is up */
10094 	dhdpcie_bus_intr_enable(bus);
10095 
10096 	bus->intr_enabled = TRUE;
10097 
10098 	/* bcmsdh_intr_unmask(bus->sdh); */
10099 #ifdef DHD_PCIE_RUNTIMEPM
10100 	bus->idlecount = 0;
10101 	bus->idletime = (int32)MAX_IDLE_COUNT;
10102 	init_waitqueue_head(&bus->rpm_queue);
10103 	mutex_init(&bus->pm_lock);
10104 #else
10105 	bus->idletime = 0;
10106 #endif /* DHD_PCIE_RUNTIMEPM */
10107 
10108 	/* Make use_d0_inform TRUE for Rev 5 for backward compatibility */
10109 	if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
10110 		bus->use_d0_inform = TRUE;
10111 	} else {
10112 		bus->use_d0_inform = FALSE;
10113 	}
10114 
10115 exit:
10116 	if (MULTIBP_ENAB(bus->sih)) {
10117 		dhd_bus_pcie_pwr_req_clear(bus);
10118 	}
10119 	return ret;
10120 }
10121 
10122 static void
dhdpcie_init_shared_addr(dhd_bus_t * bus)10123 dhdpcie_init_shared_addr(dhd_bus_t *bus)
10124 {
10125 	uint32 addr = 0;
10126 	uint32 val = 0;
10127 
10128 	addr = bus->dongle_ram_base + bus->ramsize - 4;
10129 #ifdef DHD_PCIE_RUNTIMEPM
10130 	dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0));
10131 #endif /* DHD_PCIE_RUNTIMEPM */
10132 	dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
10133 }
10134 
10135 bool
dhdpcie_chipmatch(uint16 vendor,uint16 device)10136 dhdpcie_chipmatch(uint16 vendor, uint16 device)
10137 {
10138 
10139 	if (vendor == PCI_VENDOR_ID_BROADCOM || vendor == PCI_VENDOR_ID_CYPRESS) {
10140 		DHD_ERROR(("%s: Supporting vendor %x device %x\n", __FUNCTION__,
10141 			vendor, device));
10142 	} else {
10143 		DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
10144 			vendor, device));
10145 		return (-ENODEV);
10146 	}
10147 
10148 	if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) ||
10149 		(device == BCM4350_D11AC5G_ID) || (device == BCM4350_CHIP_ID) ||
10150 		(device == BCM43569_CHIP_ID)) {
10151 		return 0;
10152 	}
10153 
10154 	if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) ||
10155 		(device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID)) {
10156 		return 0;
10157 	}
10158 
10159 	if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) ||
10160 		(device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID)) {
10161 		return 0;
10162 	}
10163 
10164 	if ((device == BCM4371_D11AC_ID) || (device == BCM4371_D11AC2G_ID) ||
10165 		(device == BCM4371_D11AC5G_ID) || (device == BCM4371_CHIP_ID)) {
10166 		return 0;
10167 	}
10168 
10169 	if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) ||
10170 		(device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device)) {
10171 		return 0;
10172 	}
10173 
10174 	if ((device == BCM43452_D11AC_ID) || (device == BCM43452_D11AC2G_ID) ||
10175 		(device == BCM43452_D11AC5G_ID)) {
10176 		return 0;
10177 	}
10178 
10179 	if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) ||
10180 		(device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID)) {
10181 		return 0;
10182 	}
10183 
10184 	if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) ||
10185 		(device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID)) {
10186 		return 0;
10187 	}
10188 
10189 	if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) ||
10190 		(device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID)) {
10191 		return 0;
10192 	}
10193 
10194 	if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) ||
10195 		(device == BCM4358_D11AC5G_ID)) {
10196 		return 0;
10197 	}
10198 
10199 	if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) ||
10200 		(device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID)) {
10201 		return 0;
10202 	}
10203 
10204 	if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) ||
10205 		(device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID)) {
10206 		return 0;
10207 	}
10208 
10209 	if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) ||
10210 		(device == BCM4359_D11AC5G_ID)) {
10211 		return 0;
10212 	}
10213 
10214 	if ((device == BCM43596_D11AC_ID) || (device == BCM43596_D11AC2G_ID) ||
10215 		(device == BCM43596_D11AC5G_ID)) {
10216 		return 0;
10217 	}
10218 
10219 	if ((device == BCM43597_D11AC_ID) || (device == BCM43597_D11AC2G_ID) ||
10220 		(device == BCM43597_D11AC5G_ID)) {
10221 		return 0;
10222 	}
10223 
10224 	if ((device == BCM4364_D11AC_ID) || (device == BCM4364_D11AC2G_ID) ||
10225 		(device == BCM4364_D11AC5G_ID) || (device == BCM4364_CHIP_ID)) {
10226 		return 0;
10227 	}
10228 
10229 	if ((device == BCM4361_D11AC_ID) || (device == BCM4361_D11AC2G_ID) ||
10230 		(device == BCM4361_D11AC5G_ID) || (device == BCM4361_CHIP_ID)) {
10231 		return 0;
10232 	}
10233 	if ((device == BCM4362_D11AX_ID) || (device == BCM4362_D11AX2G_ID) ||
10234 		(device == BCM4362_D11AX5G_ID) || (device == BCM4362_CHIP_ID)) {
10235 		return 0;
10236 	}
10237 	if ((device == BCM43751_D11AX_ID) || (device == BCM43751_D11AX2G_ID) ||
10238 		(device == BCM43751_D11AX5G_ID) || (device == BCM43751_CHIP_ID)) {
10239 		return 0;
10240 	}
10241 	if ((device == BCM4347_D11AC_ID) || (device == BCM4347_D11AC2G_ID) ||
10242 		(device == BCM4347_D11AC5G_ID) || (device == BCM4347_CHIP_ID)) {
10243 		return 0;
10244 	}
10245 
10246 	if ((device == BCM4365_D11AC_ID) || (device == BCM4365_D11AC2G_ID) ||
10247 		(device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID)) {
10248 		return 0;
10249 	}
10250 
10251 	if ((device == BCM4366_D11AC_ID) || (device == BCM4366_D11AC2G_ID) ||
10252 		(device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID) ||
10253 		(device == BCM43664_CHIP_ID) || (device == BCM43666_CHIP_ID)) {
10254 		return 0;
10255 	}
10256 
10257 	if ((device == BCM4369_D11AX_ID) || (device == BCM4369_D11AX2G_ID) ||
10258 		(device == BCM4369_D11AX5G_ID) || (device == BCM4369_CHIP_ID)) {
10259 		return 0;
10260 	}
10261 
10262 	if ((device == BCM4373_D11AC_ID) || (device == BCM4373_D11AC2G_ID) ||
10263 		(device == BCM4373_D11AC5G_ID) || (device == BCM4373_CHIP_ID)) {
10264 		return 0;
10265 	}
10266 
10267 	if ((device == BCM4375_D11AX_ID) || (device == BCM4375_D11AX2G_ID) ||
10268 		(device == BCM4375_D11AX5G_ID) || (device == BCM4375_CHIP_ID)) {
10269 		return 0;
10270 	}
10271 
10272 #ifdef CHIPS_CUSTOMER_HW6
10273 	if ((device == BCM4376_D11AC_ID) || (device == BCM4376_D11AC2G_ID) ||
10274 		(device == BCM4376_D11AC5G_ID) || (device == BCM4376_CHIP_ID)) {
10275 		return 0;
10276 	}
10277 	if ((device == BCM4377_M_D11AX_ID) || (device == BCM4377_D11AX_ID) ||
10278 		(device == BCM4377_D11AX2G_ID) || (device == BCM4377_D11AX5G_ID) ||
10279 		(device == BCM4377_CHIP_ID)) {
10280 		return 0;
10281 	}
10282 	if ((device == BCM4378_D11AC_ID) || (device == BCM4378_D11AC2G_ID) ||
10283 		(device == BCM4378_D11AC5G_ID) || (device == BCM4378_CHIP_ID)) {
10284 		return 0;
10285 	}
10286 #endif /* CHIPS_CUSTOMER_HW6 */
10287 #ifdef CHIPS_CUSTOMER_HW6
10288 	if ((device == BCM4368_D11AC_ID) || (device == BCM4368_D11AC2G_ID) ||
10289 		(device == BCM4368_D11AC5G_ID) || (device == BCM4368_CHIP_ID)) {
10290 		return 0;
10291 	}
10292 	if ((device == BCM4367_D11AC_ID) || (device == BCM4367_D11AC2G_ID) ||
10293 		(device == BCM4367_D11AC5G_ID) || (device == BCM4367_CHIP_ID)) {
10294 		return 0;
10295 	}
10296 #endif /* CHIPS_CUSTOMER_HW6 */
10297 
10298 	/* CYW55560 */
10299 	if (device == CYW55560_WLAN_ID) {
10300 		return 0;
10301 	}
10302 	DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device));
10303 	return (-ENODEV);
10304 } /* dhdpcie_chipmatch */
10305 
10306 /*
10307  * Name:  dhdpcie_sromotp_customvar
10308  * Description:
10309  * read otp/sprom and parse & store customvar.
10310  * A shadow of OTP/SPROM exists in ChipCommon Region
10311  * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
10312  * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
10313  * can also be read from ChipCommon Registers.
10314  */
10315 static int
dhdpcie_sromotp_customvar(dhd_bus_t * bus,uint32 * customvar1,uint32 * customvar2)10316 dhdpcie_sromotp_customvar(dhd_bus_t *bus,  uint32 *customvar1, uint32 *customvar2)
10317 {
10318 	uint16 dump_offset = 0;
10319 	uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
10320 	/* Table for 65nm OTP Size (in bits) */
10321 	int  otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
10322 	volatile uint16 *nvm_shadow;
10323 	uint cur_coreid;
10324 	uint chipc_corerev;
10325 	chipcregs_t *chipcregs;
10326 	uint16 *otp_dump;
10327 	uint8 *cis;
10328 	uint8 tup, tlen;
10329 	int i = 0;
10330 
10331 	/* Save the current core */
10332 	cur_coreid = si_coreid(bus->sih);
10333 	/* Switch to ChipC */
10334 	chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
10335 	ASSERT(chipcregs != NULL);
10336 	chipc_corerev = si_corerev(bus->sih);
10337 	/* Check ChipcommonCore Rev */
10338 	if (chipc_corerev < 44) {
10339 		DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
10340 		return BCME_UNSUPPORTED;
10341 	}
10342 	/* Check ChipID */
10343 	if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) &&
10344 		((uint16)bus->sih->chip != BCM4355_CHIP_ID) &&
10345 		((uint16)bus->sih->chip != BCM4359_CHIP_ID) &&
10346 		((uint16)bus->sih->chip != BCM4349_CHIP_ID)) {
10347 		DHD_ERROR(("%s: supported for chips"
10348 				"4350/4345/4355/4364/4349/4359 only\n", __FUNCTION__));
10349 		return BCME_UNSUPPORTED;
10350 	}
10351 	/* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
10352 	if (chipcregs->sromcontrol & SRC_PRESENT) {
10353 		/* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
10354 		sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
10355 				>> SRC_SIZE_SHIFT))) * 1024;
10356 		DHD_TRACE(("\nSPROM Present (Size %d bits)\n", sprom_size));
10357 	}
10358 	if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
10359 		DHD_TRACE(("\nOTP Present"));
10360 		if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
10361 				== OTPL_WRAP_TYPE_40NM) {
10362 			/* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
10363 			/* Chipcommon rev51 is a variation on rev45 and does not support
10364 			* the latest OTP configuration.
10365 			*/
10366 			if (chipc_corerev != 51 && chipc_corerev >= 49) {
10367 				otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
10368 					>> OTPL_ROW_SIZE_SHIFT) + 1) * 1024;
10369 				DHD_TRACE(("(Size %d bits)\n", otp_size));
10370 			} else {
10371 				otp_size =  (((chipcregs->capabilities & CC_CAP_OTPSIZE)
10372 						>> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
10373 				DHD_TRACE(("(Size %d bits)\n", otp_size));
10374 			}
10375 		} else {
10376 			/* This part is untested since newer chips have 40nm OTP */
10377 			/* Chipcommon rev51 is a variation on rev45 and does not support
10378 			* the latest OTP configuration.
10379 			*/
10380 				if (chipc_corerev != 51 && chipc_corerev >= 49) {
10381 					otp_size = otp_size_65nm[(chipcregs->otplayout &
10382 							OTPL_ROW_SIZE_MASK) >> OTPL_ROW_SIZE_SHIFT];
10383 					DHD_TRACE(("(Size %d bits)\n", otp_size));
10384 				} else {
10385 					otp_size = otp_size_65nm[(chipcregs->capabilities &
10386 							CC_CAP_OTPSIZE)	>> CC_CAP_OTPSIZE_SHIFT];
10387 					DHD_TRACE(("(Size %d bits)\n", otp_size));
10388 					DHD_TRACE(("%s: 65nm/130nm OTP Size not tested. \n",
10389 							__FUNCTION__));
10390 				}
10391 		}
10392 	}
10393 	/* Chipcommon rev51 is a variation on rev45 and does not support
10394 	* the latest OTP configuration.
10395 	*/
10396 	if (chipc_corerev != 51 && chipc_corerev >= 49) {
10397 		if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
10398 				((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) {
10399 			DHD_ERROR(("%s: SPROM and OTP could not be found "
10400 					"sromcontrol = %x, otplayout = %x \n",
10401 					__FUNCTION__, chipcregs->sromcontrol,
10402 					chipcregs->otplayout));
10403 			return BCME_NOTFOUND;
10404 		}
10405 	} else {
10406 		if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
10407 			((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
10408 			DHD_ERROR(("%s: SPROM and OTP could not be found "
10409 					"sromcontrol = %x, capablities = %x \n",
10410 					__FUNCTION__, chipcregs->sromcontrol,
10411 					chipcregs->capabilities));
10412 			return BCME_NOTFOUND;
10413 		}
10414 	}
10415 	/* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
10416 	if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) &&
10417 			(chipcregs->sromcontrol & SRC_OTPPRESENT)) {
10418 		DHD_TRACE(("OTP Strap selected.\n"
10419 				"\nOTP Shadow in ChipCommon:\n"));
10420 		dump_size = otp_size / 16 ; /* 16bit words */
10421 	} else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
10422 			(chipcregs->sromcontrol & SRC_PRESENT)) {
10423 		DHD_TRACE(("SPROM Strap selected\n"
10424 				"\nSPROM Shadow in ChipCommon:\n"));
10425 		/* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
10426 		/* dump_size in 16bit words */
10427 		dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
10428 	} else {
10429 		DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
10430 				__FUNCTION__));
10431 		return BCME_NOTFOUND;
10432 	}
10433 	if (bus->regs == NULL) {
10434 		DHD_ERROR(("ChipCommon Regs. not initialized\n"));
10435 		return BCME_NOTREADY;
10436 	} else {
10437 		/* Chipcommon rev51 is a variation on rev45 and does not support
10438 		* the latest OTP configuration.
10439 		*/
10440 		if (chipc_corerev != 51 && chipc_corerev >= 49) {
10441 			/* Chip common can read only 8kbits,
10442 			* for ccrev >= 49 otp size is around 12 kbits so use GCI core
10443 			*/
10444 			nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0);
10445 		}  else {
10446 			/* Point to the SPROM/OTP shadow in ChipCommon */
10447 			nvm_shadow = chipcregs->sromotp;
10448 		}
10449 		if (nvm_shadow == NULL) {
10450 			DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__));
10451 			return BCME_NOTFOUND;
10452 		}
10453 		otp_dump = kzalloc(dump_size*2, GFP_KERNEL);
10454 		if (otp_dump == NULL) {
10455 			DHD_ERROR(("%s: Insufficient system memory of size %d\n",
10456 				__FUNCTION__, dump_size));
10457 			return BCME_NOMEM;
10458 		}
10459 		/*
10460 		* Read 16 bits / iteration.
10461 		* dump_size & dump_offset in 16-bit words
10462 		*/
10463 		while (dump_offset < dump_size) {
10464 			*(otp_dump + dump_offset) = *(nvm_shadow + dump_offset);
10465 			dump_offset += 0x1;
10466 		}
10467 		/* Read from cis tuple start address */
10468 		cis = (uint8 *)otp_dump + CISTPL_OFFSET;
10469 		/* parse value of customvar2 tuple */
10470 		do {
10471 			tup = cis[i++];
10472 			if (tup == CISTPL_NULL || tup == CISTPL_END)
10473 				tlen = 0;
10474 			else
10475 				tlen = cis[i++];
10476 			if ((i + tlen) >= dump_size*2)
10477 				break;
10478 			switch (tup) {
10479 				case CISTPL_BRCM_HNBU:
10480 				switch (cis[i]) {
10481 					case HNBU_CUSTOM1:
10482 						*customvar1 = ((cis[i + 4] << 24) +
10483 								(cis[i + 3] << 16) +
10484 								(cis[i + 2] << 8) +
10485 								cis[i + 1]);
10486 						DHD_TRACE(("%s : customvar1 [%x]\n",
10487 								__FUNCTION__, *customvar1));
10488 						break;
10489 					case HNBU_CUSTOM2:
10490 						*customvar2 = ((cis[i + 4] << 24) +
10491 								(cis[i + 3] << 16) +
10492 								(cis[i + 2] << 8) +
10493 								cis[i + 1]);
10494 						DHD_TRACE(("%s : customvar2 [%x]\n",
10495 							__FUNCTION__, *customvar2));
10496 						break;
10497 					default:
10498 						break;
10499 				}
10500 					break;
10501 				default:
10502 					break;
10503 			}
10504 			i += tlen;
10505 		} while (tup != 0xff);
10506 
10507 		if (otp_dump) {
10508 			kfree(otp_dump);
10509 			otp_dump = NULL;
10510 		}
10511 	}
10512 	/* Switch back to the original core */
10513 	si_setcore(bus->sih, cur_coreid, 0);
10514 	return BCME_OK;
10515 } /* dhdpcie_sromotp_customvar */
10516 
10517 /**
10518  * Name:  dhdpcie_cc_nvmshadow
10519  *
10520  * Description:
10521  * A shadow of OTP/SPROM exists in ChipCommon Region
10522  * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
10523  * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
10524  * can also be read from ChipCommon Registers.
10525  */
10526 static int
dhdpcie_cc_nvmshadow(dhd_bus_t * bus,struct bcmstrbuf * b)10527 dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b)
10528 {
10529 	uint16 dump_offset = 0;
10530 	uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
10531 
10532 	/* Table for 65nm OTP Size (in bits) */
10533 	int  otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
10534 
10535 	volatile uint16 *nvm_shadow;
10536 
10537 	uint cur_coreid;
10538 	uint chipc_corerev;
10539 	chipcregs_t *chipcregs;
10540 
10541 	/* Save the current core */
10542 	cur_coreid = si_coreid(bus->sih);
10543 	/* Switch to ChipC */
10544 	chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
10545 	ASSERT(chipcregs != NULL);
10546 
10547 	chipc_corerev = si_corerev(bus->sih);
10548 
10549 	/* Check ChipcommonCore Rev */
10550 	if (chipc_corerev < 44) {
10551 		DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
10552 		return BCME_UNSUPPORTED;
10553 	}
10554 
10555 	/* Check ChipID */
10556 	if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) &&
10557 	        ((uint16)bus->sih->chip != BCM4355_CHIP_ID) &&
10558 	        ((uint16)bus->sih->chip != BCM4364_CHIP_ID)) {
10559 		DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips"
10560 					"4350/4345/4355/4364 only\n", __FUNCTION__));
10561 		return BCME_UNSUPPORTED;
10562 	}
10563 
10564 	/* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
10565 	if (chipcregs->sromcontrol & SRC_PRESENT) {
10566 		/* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
10567 		sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
10568 					>> SRC_SIZE_SHIFT))) * 1024;
10569 		bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size);
10570 	}
10571 
10572 	if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
10573 		bcm_bprintf(b, "\nOTP Present");
10574 
10575 		if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
10576 			== OTPL_WRAP_TYPE_40NM) {
10577 			/* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
10578 			/* Chipcommon rev51 is a variation on rev45 and does not support
10579 			 * the latest OTP configuration.
10580 			 */
10581 			if (chipc_corerev != 51 && chipc_corerev >= 49) {
10582 				otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
10583 					>> OTPL_ROW_SIZE_SHIFT) + 1) * 1024;
10584 				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
10585 			} else {
10586 				otp_size =  (((chipcregs->capabilities & CC_CAP_OTPSIZE)
10587 				        >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
10588 				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
10589 			}
10590 		} else {
10591 			/* This part is untested since newer chips have 40nm OTP */
10592 			/* Chipcommon rev51 is a variation on rev45 and does not support
10593 			 * the latest OTP configuration.
10594 			 */
10595 			if (chipc_corerev != 51 && chipc_corerev >= 49) {
10596 				otp_size = otp_size_65nm[(chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
10597 						>> OTPL_ROW_SIZE_SHIFT];
10598 				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
10599 			} else {
10600 				otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE)
10601 					        >> CC_CAP_OTPSIZE_SHIFT];
10602 				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
10603 				DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
10604 					__FUNCTION__));
10605 			}
10606 		}
10607 	}
10608 
10609 	/* Chipcommon rev51 is a variation on rev45 and does not support
10610 	 * the latest OTP configuration.
10611 	 */
10612 	if (chipc_corerev != 51 && chipc_corerev >= 49) {
10613 		if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
10614 			((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) {
10615 			DHD_ERROR(("%s: SPROM and OTP could not be found "
10616 				"sromcontrol = %x, otplayout = %x \n",
10617 				__FUNCTION__, chipcregs->sromcontrol, chipcregs->otplayout));
10618 			return BCME_NOTFOUND;
10619 		}
10620 	} else {
10621 		if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
10622 			((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
10623 			DHD_ERROR(("%s: SPROM and OTP could not be found "
10624 				"sromcontrol = %x, capablities = %x \n",
10625 				__FUNCTION__, chipcregs->sromcontrol, chipcregs->capabilities));
10626 			return BCME_NOTFOUND;
10627 		}
10628 	}
10629 
10630 	/* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
10631 	if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) &&
10632 		(chipcregs->sromcontrol & SRC_OTPPRESENT)) {
10633 
10634 		bcm_bprintf(b, "OTP Strap selected.\n"
10635 		               "\nOTP Shadow in ChipCommon:\n");
10636 
10637 		dump_size = otp_size / 16 ; /* 16bit words */
10638 
10639 	} else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
10640 		(chipcregs->sromcontrol & SRC_PRESENT)) {
10641 
10642 		bcm_bprintf(b, "SPROM Strap selected\n"
10643 				"\nSPROM Shadow in ChipCommon:\n");
10644 
10645 		/* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
10646 		/* dump_size in 16bit words */
10647 		dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
10648 	} else {
10649 		DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
10650 			__FUNCTION__));
10651 		return BCME_NOTFOUND;
10652 	}
10653 
10654 	if (bus->regs == NULL) {
10655 		DHD_ERROR(("ChipCommon Regs. not initialized\n"));
10656 		return BCME_NOTREADY;
10657 	} else {
10658 		bcm_bprintf(b, "\n OffSet:");
10659 
10660 		/* Chipcommon rev51 is a variation on rev45 and does not support
10661 		 * the latest OTP configuration.
10662 		 */
10663 		if (chipc_corerev != 51 && chipc_corerev >= 49) {
10664 			/* Chip common can read only 8kbits,
10665 			* for ccrev >= 49 otp size is around 12 kbits so use GCI core
10666 			*/
10667 			nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0);
10668 		} else {
10669 			/* Point to the SPROM/OTP shadow in ChipCommon */
10670 			nvm_shadow = chipcregs->sromotp;
10671 		}
10672 
10673 		if (nvm_shadow == NULL) {
10674 			DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__));
10675 			return BCME_NOTFOUND;
10676 		}
10677 
10678 		/*
10679 		* Read 16 bits / iteration.
10680 		* dump_size & dump_offset in 16-bit words
10681 		*/
10682 		while (dump_offset < dump_size) {
10683 			if (dump_offset % 2 == 0)
10684 				/* Print the offset in the shadow space in Bytes */
10685 				bcm_bprintf(b, "\n 0x%04x", dump_offset * 2);
10686 
10687 			bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
10688 			dump_offset += 0x1;
10689 		}
10690 	}
10691 
10692 	/* Switch back to the original core */
10693 	si_setcore(bus->sih, cur_coreid, 0);
10694 
10695 	return BCME_OK;
10696 } /* dhdpcie_cc_nvmshadow */
10697 
10698 /** Flow rings are dynamically created and destroyed */
dhd_bus_clean_flow_ring(dhd_bus_t * bus,void * node)10699 void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node)
10700 {
10701 	void *pkt;
10702 	flow_queue_t *queue;
10703 	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node;
10704 	unsigned long flags;
10705 
10706 	queue = &flow_ring_node->queue;
10707 
10708 #ifdef DHDTCPACK_SUPPRESS
10709 	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
10710 	 * when there is a newly coming packet from network stack.
10711 	 */
10712 	dhd_tcpack_info_tbl_clean(bus->dhd);
10713 #endif /* DHDTCPACK_SUPPRESS */
10714 
10715 #ifdef DHD_HP2P
10716 	if (flow_ring_node->hp2p_ring) {
10717 		bus->dhd->hp2p_ring_active = FALSE;
10718 		flow_ring_node->hp2p_ring = FALSE;
10719 	}
10720 #endif /* DHD_HP2P */
10721 
10722 	/* clean up BUS level info */
10723 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
10724 
10725 	/* Flush all pending packets in the queue, if any */
10726 	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
10727 		PKTFREE(bus->dhd->osh, pkt, TRUE);
10728 	}
10729 	ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
10730 
10731 	/* Reinitialise flowring's queue */
10732 	dhd_flow_queue_reinit(bus->dhd, queue, FLOW_RING_QUEUE_THRESHOLD);
10733 	flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
10734 	flow_ring_node->active = FALSE;
10735 
10736 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
10737 
10738 	/* Hold flowring_list_lock to ensure no race condition while accessing the List */
10739 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
10740 	dll_delete(&flow_ring_node->list);
10741 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
10742 
10743 	/* Release the flowring object back into the pool */
10744 	dhd_prot_flowrings_pool_release(bus->dhd,
10745 		flow_ring_node->flowid, flow_ring_node->prot_info);
10746 
10747 	/* Free the flowid back to the flowid allocator */
10748 	dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
10749 	                flow_ring_node->flowid);
10750 }
10751 
10752 /**
10753  * Allocate a Flow ring buffer,
10754  * Init Ring buffer, send Msg to device about flow ring creation
10755 */
10756 int
dhd_bus_flow_ring_create_request(dhd_bus_t * bus,void * arg)10757 dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg)
10758 {
10759 	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
10760 
10761 	DHD_INFO(("%s :Flow create\n", __FUNCTION__));
10762 
10763 	/* Send Msg to device about flow ring creation */
10764 	if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK)
10765 		return BCME_NOMEM;
10766 
10767 	return BCME_OK;
10768 }
10769 
10770 /** Handle response from dongle on a 'flow ring create' request */
10771 void
dhd_bus_flow_ring_create_response(dhd_bus_t * bus,uint16 flowid,int32 status)10772 dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status)
10773 {
10774 	flow_ring_node_t *flow_ring_node;
10775 	unsigned long flags;
10776 
10777 	DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
10778 
10779 	/* Boundary check of the flowid */
10780 	if (flowid >= bus->dhd->num_flow_rings) {
10781 		DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
10782 			flowid, bus->dhd->num_flow_rings));
10783 		return;
10784 	}
10785 
10786 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
10787 	if (!flow_ring_node) {
10788 		DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
10789 		return;
10790 	}
10791 
10792 	ASSERT(flow_ring_node->flowid == flowid);
10793 	if (flow_ring_node->flowid != flowid) {
10794 		DHD_ERROR(("%s: flowid %d is different from the flowid "
10795 			"of the flow_ring_node %d\n", __FUNCTION__, flowid,
10796 			flow_ring_node->flowid));
10797 		return;
10798 	}
10799 
10800 	if (status != BCME_OK) {
10801 		DHD_ERROR(("%s Flow create Response failure error status = %d \n",
10802 		     __FUNCTION__, status));
10803 		/* Call Flow clean up */
10804 		dhd_bus_clean_flow_ring(bus, flow_ring_node);
10805 		return;
10806 	}
10807 
10808 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
10809 	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
10810 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
10811 
10812 	/* Now add the Flow ring node into the active list
10813 	 * Note that this code to add the newly created node to the active
10814 	 * list was living in dhd_flowid_lookup. But note that after
10815 	 * adding the node to the active list the contents of node is being
10816 	 * filled in dhd_prot_flow_ring_create.
10817 	 * If there is a D2H interrupt after the node gets added to the
10818 	 * active list and before the node gets populated with values
10819 	 * from the Bottom half dhd_update_txflowrings would be called.
10820 	 * which will then try to walk through the active flow ring list,
10821 	 * pickup the nodes and operate on them. Now note that since
10822 	 * the function dhd_prot_flow_ring_create is not finished yet
10823 	 * the contents of flow_ring_node can still be NULL leading to
10824 	 * crashes. Hence the flow_ring_node should be added to the
10825 	 * active list only after its truely created, which is after
10826 	 * receiving the create response message from the Host.
10827 	 */
10828 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
10829 	dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
10830 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
10831 
10832 	dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
10833 
10834 	return;
10835 }
10836 
10837 int
dhd_bus_flow_ring_delete_request(dhd_bus_t * bus,void * arg)10838 dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg)
10839 {
10840 	void * pkt;
10841 	flow_queue_t *queue;
10842 	flow_ring_node_t *flow_ring_node;
10843 	unsigned long flags;
10844 
10845 	DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
10846 
10847 	flow_ring_node = (flow_ring_node_t *)arg;
10848 
10849 #ifdef DHDTCPACK_SUPPRESS
10850 	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
10851 	 * when there is a newly coming packet from network stack.
10852 	 */
10853 	dhd_tcpack_info_tbl_clean(bus->dhd);
10854 #endif /* DHDTCPACK_SUPPRESS */
10855 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
10856 	if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) {
10857 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
10858 		DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__, flow_ring_node->flowid));
10859 		return BCME_ERROR;
10860 	}
10861 	flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
10862 
10863 	queue = &flow_ring_node->queue; /* queue associated with flow ring */
10864 
10865 	/* Flush all pending packets in the queue, if any */
10866 	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
10867 		PKTFREE(bus->dhd->osh, pkt, TRUE);
10868 	}
10869 	ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
10870 
10871 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
10872 
10873 	/* Send Msg to device about flow ring deletion */
10874 	dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node);
10875 
10876 	return BCME_OK;
10877 }
10878 
10879 void
dhd_bus_flow_ring_delete_response(dhd_bus_t * bus,uint16 flowid,uint32 status)10880 dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
10881 {
10882 	flow_ring_node_t *flow_ring_node;
10883 
10884 	DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
10885 
10886 	/* Boundary check of the flowid */
10887 	if (flowid >= bus->dhd->num_flow_rings) {
10888 		DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
10889 			flowid, bus->dhd->num_flow_rings));
10890 		return;
10891 	}
10892 
10893 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
10894 	if (!flow_ring_node) {
10895 		DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
10896 		return;
10897 	}
10898 
10899 	ASSERT(flow_ring_node->flowid == flowid);
10900 	if (flow_ring_node->flowid != flowid) {
10901 		DHD_ERROR(("%s: flowid %d is different from the flowid "
10902 			"of the flow_ring_node %d\n", __FUNCTION__, flowid,
10903 			flow_ring_node->flowid));
10904 		return;
10905 	}
10906 
10907 	if (status != BCME_OK) {
10908 		DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
10909 		    __FUNCTION__, status));
10910 		return;
10911 	}
10912 	/* Call Flow clean up */
10913 	dhd_bus_clean_flow_ring(bus, flow_ring_node);
10914 
10915 	return;
10916 
10917 }
10918 
dhd_bus_flow_ring_flush_request(dhd_bus_t * bus,void * arg)10919 int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
10920 {
10921 	void *pkt;
10922 	flow_queue_t *queue;
10923 	flow_ring_node_t *flow_ring_node;
10924 	unsigned long flags;
10925 
10926 	DHD_INFO(("%s :Flow Flush\n", __FUNCTION__));
10927 
10928 	flow_ring_node = (flow_ring_node_t *)arg;
10929 
10930 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
10931 	queue = &flow_ring_node->queue; /* queue associated with flow ring */
10932 	/* Flow ring status will be set back to FLOW_RING_STATUS_OPEN
10933 	 * once flow ring flush response is received for this flowring node.
10934 	 */
10935 	flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
10936 
10937 #ifdef DHDTCPACK_SUPPRESS
10938 	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
10939 	 * when there is a newly coming packet from network stack.
10940 	 */
10941 	dhd_tcpack_info_tbl_clean(bus->dhd);
10942 #endif /* DHDTCPACK_SUPPRESS */
10943 
10944 	/* Flush all pending packets in the queue, if any */
10945 	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
10946 		PKTFREE(bus->dhd->osh, pkt, TRUE);
10947 	}
10948 	ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
10949 
10950 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
10951 
10952 	/* Send Msg to device about flow ring flush */
10953 	dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
10954 
10955 	return BCME_OK;
10956 }
10957 
10958 void
dhd_bus_flow_ring_flush_response(dhd_bus_t * bus,uint16 flowid,uint32 status)10959 dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
10960 {
10961 	flow_ring_node_t *flow_ring_node;
10962 
10963 	if (status != BCME_OK) {
10964 		DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
10965 		    __FUNCTION__, status));
10966 		return;
10967 	}
10968 
10969 	/* Boundary check of the flowid */
10970 	if (flowid >= bus->dhd->num_flow_rings) {
10971 		DHD_ERROR(("%s: flowid is invalid %d, max %d\n", __FUNCTION__,
10972 			flowid, bus->dhd->num_flow_rings));
10973 		return;
10974 	}
10975 
10976 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
10977 	if (!flow_ring_node) {
10978 		DHD_ERROR(("%s: flow_ring_node is NULL\n", __FUNCTION__));
10979 		return;
10980 	}
10981 
10982 	ASSERT(flow_ring_node->flowid == flowid);
10983 	if (flow_ring_node->flowid != flowid) {
10984 		DHD_ERROR(("%s: flowid %d is different from the flowid "
10985 			"of the flow_ring_node %d\n", __FUNCTION__, flowid,
10986 			flow_ring_node->flowid));
10987 		return;
10988 	}
10989 
10990 	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
10991 	return;
10992 }
10993 
10994 uint32
dhd_bus_max_h2d_queues(struct dhd_bus * bus)10995 dhd_bus_max_h2d_queues(struct dhd_bus *bus)
10996 {
10997 	return bus->max_submission_rings;
10998 }
10999 
11000 /* To be symmetric with SDIO */
11001 void
dhd_bus_pktq_flush(dhd_pub_t * dhdp)11002 dhd_bus_pktq_flush(dhd_pub_t *dhdp)
11003 {
11004 	return;
11005 }
11006 
11007 void
dhd_bus_set_linkdown(dhd_pub_t * dhdp,bool val)11008 dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val)
11009 {
11010 	dhdp->bus->is_linkdown = val;
11011 }
11012 
11013 int
dhd_bus_get_linkdown(dhd_pub_t * dhdp)11014 dhd_bus_get_linkdown(dhd_pub_t *dhdp)
11015 {
11016 	return dhdp->bus->is_linkdown;
11017 }
11018 
11019 int
dhd_bus_get_cto(dhd_pub_t * dhdp)11020 dhd_bus_get_cto(dhd_pub_t *dhdp)
11021 {
11022 	return dhdp->bus->cto_triggered;
11023 }
11024 
11025 #ifdef IDLE_TX_FLOW_MGMT
11026 /* resume request */
11027 int
dhd_bus_flow_ring_resume_request(dhd_bus_t * bus,void * arg)11028 dhd_bus_flow_ring_resume_request(dhd_bus_t *bus, void *arg)
11029 {
11030 	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
11031 
11032 	DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__, flow_ring_node->flowid));
11033 
11034 	flow_ring_node->status = FLOW_RING_STATUS_RESUME_PENDING;
11035 
11036 	/* Send Msg to device about flow ring resume */
11037 	dhd_prot_flow_ring_resume(bus->dhd, flow_ring_node);
11038 
11039 	return BCME_OK;
11040 }
11041 
11042 /* add the node back to active flowring */
11043 void
dhd_bus_flow_ring_resume_response(dhd_bus_t * bus,uint16 flowid,int32 status)11044 dhd_bus_flow_ring_resume_response(dhd_bus_t *bus, uint16 flowid, int32 status)
11045 {
11046 
11047 	flow_ring_node_t *flow_ring_node;
11048 
11049 	DHD_TRACE(("%s :flowid %d \n", __FUNCTION__, flowid));
11050 
11051 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
11052 	ASSERT(flow_ring_node->flowid == flowid);
11053 
11054 	if (status != BCME_OK) {
11055 		DHD_ERROR(("%s Error Status = %d \n",
11056 			__FUNCTION__, status));
11057 		return;
11058 	}
11059 
11060 	DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n",
11061 		__FUNCTION__, flow_ring_node->flowid,  flow_ring_node->queue.len));
11062 
11063 	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
11064 
11065 	dhd_bus_schedule_queue(bus, flowid, FALSE);
11066 	return;
11067 }
11068 
11069 /* scan the flow rings in active list for idle time out */
11070 void
dhd_bus_check_idle_scan(dhd_bus_t * bus)11071 dhd_bus_check_idle_scan(dhd_bus_t *bus)
11072 {
11073 	uint64 time_stamp; /* in millisec */
11074 	uint64 diff;
11075 
11076 	time_stamp = OSL_SYSUPTIME();
11077 	diff = time_stamp - bus->active_list_last_process_ts;
11078 
11079 	if (diff > IDLE_FLOW_LIST_TIMEOUT) {
11080 		dhd_bus_idle_scan(bus);
11081 		bus->active_list_last_process_ts = OSL_SYSUPTIME();
11082 	}
11083 
11084 	return;
11085 }
11086 
11087 /* scan the nodes in active list till it finds a non idle node */
11088 void
dhd_bus_idle_scan(dhd_bus_t * bus)11089 dhd_bus_idle_scan(dhd_bus_t *bus)
11090 {
11091 	dll_t *item, *prev;
11092 	flow_ring_node_t *flow_ring_node;
11093 	uint64 time_stamp, diff;
11094 	unsigned long flags;
11095 	uint16 ringid[MAX_SUSPEND_REQ];
11096 	uint16 count = 0;
11097 
11098 	time_stamp = OSL_SYSUPTIME();
11099 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
11100 
11101 	for (item = dll_tail_p(&bus->flowring_active_list);
11102 	         !dll_end(&bus->flowring_active_list, item); item = prev) {
11103 		prev = dll_prev_p(item);
11104 
11105 		flow_ring_node = dhd_constlist_to_flowring(item);
11106 
11107 		if (flow_ring_node->flowid == (bus->max_submission_rings - 1))
11108 			continue;
11109 
11110 		if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
11111 			/* Takes care of deleting zombie rings */
11112 			/* delete from the active list */
11113 			DHD_INFO(("deleting flow id %u from active list\n",
11114 				flow_ring_node->flowid));
11115 			__dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
11116 			continue;
11117 		}
11118 
11119 		diff = time_stamp - flow_ring_node->last_active_ts;
11120 
11121 		if ((diff > IDLE_FLOW_RING_TIMEOUT) && !(flow_ring_node->queue.len))  {
11122 			DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node->flowid));
11123 			/* delete from the active list */
11124 			__dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
11125 			flow_ring_node->status = FLOW_RING_STATUS_SUSPENDED;
11126 			ringid[count] = flow_ring_node->flowid;
11127 			count++;
11128 			if (count == MAX_SUSPEND_REQ) {
11129 				/* create a batch message now!! */
11130 				dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
11131 				count = 0;
11132 			}
11133 
11134 		} else {
11135 
11136 			/* No more scanning, break from here! */
11137 			break;
11138 		}
11139 	}
11140 
11141 	if (count) {
11142 		dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
11143 	}
11144 
11145 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
11146 
11147 	return;
11148 }
11149 
dhd_flow_ring_move_to_active_list_head(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)11150 void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
11151 {
11152 	unsigned long flags;
11153 	dll_t* list;
11154 
11155 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
11156 	/* check if the node is already at head, otherwise delete it and prepend */
11157 	list = dll_head_p(&bus->flowring_active_list);
11158 	if (&flow_ring_node->list != list) {
11159 		dll_delete(&flow_ring_node->list);
11160 		dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
11161 	}
11162 
11163 	/* update flow ring timestamp */
11164 	flow_ring_node->last_active_ts = OSL_SYSUPTIME();
11165 
11166 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
11167 
11168 	return;
11169 }
11170 
dhd_flow_ring_add_to_active_list(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)11171 void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
11172 {
11173 	unsigned long flags;
11174 
11175 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
11176 
11177 	dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
11178 	/* update flow ring timestamp */
11179 	flow_ring_node->last_active_ts = OSL_SYSUPTIME();
11180 
11181 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
11182 
11183 	return;
11184 }
__dhd_flow_ring_delete_from_active_list(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)11185 void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
11186 {
11187 	dll_delete(&flow_ring_node->list);
11188 }
11189 
dhd_flow_ring_delete_from_active_list(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)11190 void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
11191 {
11192 	unsigned long flags;
11193 
11194 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
11195 
11196 	__dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
11197 
11198 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
11199 
11200 	return;
11201 }
11202 #endif /* IDLE_TX_FLOW_MGMT */
11203 
11204 int
dhdpcie_bus_clock_start(struct dhd_bus * bus)11205 dhdpcie_bus_clock_start(struct dhd_bus *bus)
11206 {
11207 	return dhdpcie_start_host_pcieclock(bus);
11208 }
11209 
11210 int
dhdpcie_bus_clock_stop(struct dhd_bus * bus)11211 dhdpcie_bus_clock_stop(struct dhd_bus *bus)
11212 {
11213 	return dhdpcie_stop_host_pcieclock(bus);
11214 }
11215 
11216 int
dhdpcie_bus_disable_device(struct dhd_bus * bus)11217 dhdpcie_bus_disable_device(struct dhd_bus *bus)
11218 {
11219 	return dhdpcie_disable_device(bus);
11220 }
11221 
11222 int
dhdpcie_bus_enable_device(struct dhd_bus * bus)11223 dhdpcie_bus_enable_device(struct dhd_bus *bus)
11224 {
11225 	return dhdpcie_enable_device(bus);
11226 }
11227 
11228 int
dhdpcie_bus_alloc_resource(struct dhd_bus * bus)11229 dhdpcie_bus_alloc_resource(struct dhd_bus *bus)
11230 {
11231 	return dhdpcie_alloc_resource(bus);
11232 }
11233 
11234 void
dhdpcie_bus_free_resource(struct dhd_bus * bus)11235 dhdpcie_bus_free_resource(struct dhd_bus *bus)
11236 {
11237 	dhdpcie_free_resource(bus);
11238 }
11239 
11240 int
dhd_bus_request_irq(struct dhd_bus * bus)11241 dhd_bus_request_irq(struct dhd_bus *bus)
11242 {
11243 	return dhdpcie_bus_request_irq(bus);
11244 }
11245 
11246 bool
dhdpcie_bus_dongle_attach(struct dhd_bus * bus)11247 dhdpcie_bus_dongle_attach(struct dhd_bus *bus)
11248 {
11249 	return dhdpcie_dongle_attach(bus);
11250 }
11251 
11252 int
dhd_bus_release_dongle(struct dhd_bus * bus)11253 dhd_bus_release_dongle(struct dhd_bus *bus)
11254 {
11255 	bool dongle_isolation;
11256 	osl_t *osh;
11257 
11258 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
11259 
11260 	if (bus) {
11261 		osh = bus->osh;
11262 		ASSERT(osh);
11263 
11264 		if (bus->dhd) {
11265 #if defined(DEBUGGER) || defined(DHD_DSCOPE)
11266 			debugger_close();
11267 #endif /* DEBUGGER || DHD_DSCOPE */
11268 
11269 			dongle_isolation = bus->dhd->dongle_isolation;
11270 			dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
11271 		}
11272 	}
11273 
11274 	return 0;
11275 }
11276 
11277 int
dhdpcie_cto_cfg_init(struct dhd_bus * bus,bool enable)11278 dhdpcie_cto_cfg_init(struct dhd_bus *bus, bool enable)
11279 {
11280 	uint32 val;
11281 	if (enable) {
11282 		dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4,
11283 			PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR);
11284 		val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
11285 		dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_BACKPLANE_EN);
11286 	} else {
11287 		dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0);
11288 		val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
11289 		dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_BACKPLANE_EN);
11290 	}
11291 	return 0;
11292 }
11293 
11294 int
dhdpcie_cto_init(struct dhd_bus * bus,bool enable)11295 dhdpcie_cto_init(struct dhd_bus *bus, bool enable)
11296 {
11297 	if (bus->sih->buscorerev < 19) {
11298 		DHD_INFO(("%s: Unsupported CTO, buscorerev=%d\n",
11299 			__FUNCTION__, bus->sih->buscorerev));
11300 		return BCME_UNSUPPORTED;
11301 	}
11302 
11303 	if (bus->sih->buscorerev == 19) {
11304 		uint32 pcie_lnkst;
11305 		si_corereg(bus->sih, bus->sih->buscoreidx,
11306 			OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS);
11307 
11308 		pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx,
11309 			OFFSETOF(sbpcieregs_t, configdata), 0, 0);
11310 
11311 		if (((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) &
11312 			PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1) {
11313 			return BCME_UNSUPPORTED;
11314 		}
11315 	}
11316 
11317 	bus->cto_enable = enable;
11318 
11319 	dhdpcie_cto_cfg_init(bus, enable);
11320 
11321 	if (enable) {
11322 		if (bus->cto_threshold == 0) {
11323 			bus->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT;
11324 		}
11325 		si_corereg(bus->sih, bus->sih->buscoreidx,
11326 			OFFSETOF(sbpcieregs_t, ctoctrl), ~0,
11327 			((bus->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) &
11328 			PCIE_CTO_TO_THRESHHOLD_MASK) |
11329 			((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) &
11330 			PCIE_CTO_CLKCHKCNT_MASK) |
11331 			PCIE_CTO_ENAB_MASK);
11332 	} else {
11333 		si_corereg(bus->sih, bus->sih->buscoreidx,
11334 			OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0);
11335 	}
11336 
11337 	DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n",
11338 		__FUNCTION__, bus->cto_enable));
11339 
11340 	return 0;
11341 }
11342 
11343 static int
dhdpcie_cto_error_recovery(struct dhd_bus * bus)11344 dhdpcie_cto_error_recovery(struct dhd_bus *bus)
11345 {
11346 	uint32 pci_intmask, err_status;
11347 	uint8 i = 0;
11348 	uint32 val;
11349 
11350 	pci_intmask = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_MASK, 4);
11351 	dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, pci_intmask & ~PCI_CTO_INT_MASK);
11352 
11353 	DHD_OS_WAKE_LOCK(bus->dhd);
11354 
11355 	DHD_ERROR(("--- CTO Triggered --- %d\n", bus->pwr_req_ref));
11356 
11357 	/*
11358 	 * DAR still accessible
11359 	 */
11360 	dhd_bus_dump_dar_registers(bus);
11361 
11362 	/* reset backplane */
11363 	val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
11364 	dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val | SPROM_CFG_TO_SB_RST);
11365 
11366 	/* clear timeout error */
11367 	while (1) {
11368 		err_status =  si_corereg(bus->sih, bus->sih->buscoreidx,
11369 			DAR_ERRLOG(bus->sih->buscorerev),
11370 			0, 0);
11371 		if (err_status & PCIE_CTO_ERR_MASK) {
11372 			si_corereg(bus->sih, bus->sih->buscoreidx,
11373 					DAR_ERRLOG(bus->sih->buscorerev),
11374 					~0, PCIE_CTO_ERR_MASK);
11375 		} else {
11376 			break;
11377 		}
11378 		OSL_DELAY(CTO_TO_CLEAR_WAIT_MS * 1000);
11379 		i++;
11380 		if (i > CTO_TO_CLEAR_WAIT_MAX_CNT) {
11381 			DHD_ERROR(("cto recovery fail\n"));
11382 
11383 			DHD_OS_WAKE_UNLOCK(bus->dhd);
11384 			return BCME_ERROR;
11385 		}
11386 	}
11387 
11388 	/* clear interrupt status */
11389 	dhdpcie_bus_cfg_write_dword(bus, PCI_INT_STATUS, 4, PCI_CTO_INT_MASK);
11390 
11391 	/* Halt ARM & remove reset */
11392 	/* TBD : we can add ARM Halt here in case */
11393 
11394 	/* reset SPROM_CFG_TO_SB_RST */
11395 	val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
11396 
11397 	DHD_ERROR(("cto recovery reset 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
11398 		PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
11399 	dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, val & ~SPROM_CFG_TO_SB_RST);
11400 
11401 	val = dhdpcie_bus_cfg_read_dword(bus, PCI_SPROM_CONTROL, 4);
11402 	DHD_ERROR(("cto recovery success 0x%x:SPROM_CFG_TO_SB_RST(0x%x) 0x%x\n",
11403 		PCI_SPROM_CONTROL, SPROM_CFG_TO_SB_RST, val));
11404 
11405 	DHD_OS_WAKE_UNLOCK(bus->dhd);
11406 
11407 	return BCME_OK;
11408 }
11409 
11410 void
dhdpcie_ssreset_dis_enum_rst(struct dhd_bus * bus)11411 dhdpcie_ssreset_dis_enum_rst(struct dhd_bus *bus)
11412 {
11413 	uint32 val;
11414 
11415 	val = dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4);
11416 	dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4,
11417 		val | (0x1 << PCIE_SSRESET_DIS_ENUM_RST_BIT));
11418 }
11419 
11420 #if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
11421 static int
dhdpcie_init_d11status(struct dhd_bus * bus)11422 dhdpcie_init_d11status(struct dhd_bus *bus)
11423 {
11424 	uint32 addr;
11425 	uint32 flags2;
11426 	int ret = 0;
11427 
11428 	if (bus->pcie_sh->flags2 & PCIE_SHARED2_D2H_D11_TX_STATUS) {
11429 		flags2 = bus->pcie_sh->flags2;
11430 		addr = bus->shared_addr + OFFSETOF(pciedev_shared_t, flags2);
11431 		flags2 |= PCIE_SHARED2_H2D_D11_TX_STATUS;
11432 		ret = dhdpcie_bus_membytes(bus, TRUE, addr,
11433 			(uint8 *)&flags2, sizeof(flags2));
11434 		if (ret < 0) {
11435 			DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n",
11436 				__FUNCTION__));
11437 			return ret;
11438 		}
11439 		bus->pcie_sh->flags2 = flags2;
11440 		bus->dhd->d11_tx_status = TRUE;
11441 	}
11442 	return ret;
11443 }
11444 
11445 #else
11446 static int
dhdpcie_init_d11status(struct dhd_bus * bus)11447 dhdpcie_init_d11status(struct dhd_bus *bus)
11448 {
11449 	return 0;
11450 }
11451 #endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
11452 
11453 #ifdef BCMPCIE_OOB_HOST_WAKE
11454 int
dhd_bus_oob_intr_register(dhd_pub_t * dhdp)11455 dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
11456 {
11457 	return dhdpcie_oob_intr_register(dhdp->bus);
11458 }
11459 
11460 void
dhd_bus_oob_intr_unregister(dhd_pub_t * dhdp)11461 dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
11462 {
11463 	dhdpcie_oob_intr_unregister(dhdp->bus);
11464 }
11465 
11466 void
dhd_bus_oob_intr_set(dhd_pub_t * dhdp,bool enable)11467 dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
11468 {
11469 	dhdpcie_oob_intr_set(dhdp->bus, enable);
11470 }
11471 #endif /* BCMPCIE_OOB_HOST_WAKE */
11472 
11473 bool
dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t * bus)11474 dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus)
11475 {
11476 	return bus->dhd->d2h_hostrdy_supported;
11477 }
11478 
11479 void
dhd_pcie_dump_core_regs(dhd_pub_t * pub,uint32 index,uint32 first_addr,uint32 last_addr)11480 dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr)
11481 {
11482 	dhd_bus_t *bus = pub->bus;
11483 	uint32	coreoffset = index << 12;
11484 	uint32	core_addr = SI_ENUM_BASE(bus->sih) + coreoffset;
11485 	uint32 value;
11486 
11487 	while (first_addr <= last_addr) {
11488 		core_addr = SI_ENUM_BASE(bus->sih) + coreoffset + first_addr;
11489 		if (serialized_backplane_access(bus, core_addr, 4, &value, TRUE) != BCME_OK) {
11490 			DHD_ERROR(("Invalid size/addr combination \n"));
11491 		}
11492 		DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr, value));
11493 		first_addr = first_addr + 4;
11494 	}
11495 }
11496 
11497 bool
dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t * bus)11498 dhdpcie_bus_get_pcie_hwa_supported(dhd_bus_t *bus)
11499 {
11500 	if (!bus->dhd)
11501 		return FALSE;
11502 	else if (bus->hwa_enab_bmap) {
11503 		return bus->dhd->hwa_enable;
11504 	} else {
11505 		return FALSE;
11506 	}
11507 }
11508 
11509 bool
dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t * bus)11510 dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus)
11511 {
11512 	if (!bus->dhd)
11513 		return FALSE;
11514 	else if (bus->idma_enabled) {
11515 		return bus->dhd->idma_enable;
11516 	} else {
11517 		return FALSE;
11518 	}
11519 }
11520 
11521 bool
dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t * bus)11522 dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus)
11523 {
11524 	if (!bus->dhd)
11525 		return FALSE;
11526 	else if (bus->ifrm_enabled) {
11527 		return bus->dhd->ifrm_enable;
11528 	} else {
11529 		return FALSE;
11530 	}
11531 }
11532 
11533 bool
dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t * bus)11534 dhdpcie_bus_get_pcie_dar_supported(dhd_bus_t *bus)
11535 {
11536 	if (!bus->dhd) {
11537 		return FALSE;
11538 	} else if (bus->dar_enabled) {
11539 		return bus->dhd->dar_enable;
11540 	} else {
11541 		return FALSE;
11542 	}
11543 }
11544 
11545 void
dhdpcie_bus_enab_pcie_dw(dhd_bus_t * bus,uint8 dw_option)11546 dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option)
11547 {
11548 	DHD_ERROR(("ENABLING DW:%d\n", dw_option));
11549 	bus->dw_option = dw_option;
11550 }
11551 
11552 void
dhd_bus_dump_trap_info(dhd_bus_t * bus,struct bcmstrbuf * strbuf)11553 dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
11554 {
11555 	trap_t *tr = &bus->dhd->last_trap_info;
11556 	bcm_bprintf(strbuf,
11557 		"\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
11558 		" lp 0x%x, rpc 0x%x"
11559 		"\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
11560 		"r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x, r8 0x%x, r9 0x%x, "
11561 		"r10 0x%x, r11 0x%x, r12 0x%x\n\n",
11562 		ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr),
11563 		ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc),
11564 		ltoh32(bus->pcie_sh->trap_addr),
11565 		ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3),
11566 		ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7),
11567 		ltoh32(tr->r8), ltoh32(tr->r9), ltoh32(tr->r10),
11568 		ltoh32(tr->r11), ltoh32(tr->r12));
11569 }
11570 
11571 int
dhd_bus_readwrite_bp_addr(dhd_pub_t * dhdp,uint addr,uint size,uint * data,bool read)11572 dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read)
11573 {
11574 	int bcmerror = 0;
11575 	struct dhd_bus *bus = dhdp->bus;
11576 
11577 	if (serialized_backplane_access(bus, addr, size, data, read) != BCME_OK) {
11578 			DHD_ERROR(("Invalid size/addr combination \n"));
11579 			bcmerror = BCME_ERROR;
11580 	}
11581 
11582 	return bcmerror;
11583 }
11584 
11585 int
dhd_get_idletime(dhd_pub_t * dhd)11586 dhd_get_idletime(dhd_pub_t *dhd)
11587 {
11588 	return dhd->bus->idletime;
11589 }
11590 
11591 static INLINE void
dhd_sbreg_op(dhd_pub_t * dhd,uint addr,uint * val,bool read)11592 dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read)
11593 {
11594 	OSL_DELAY(1);
11595 	if (serialized_backplane_access(dhd->bus, addr, sizeof(uint), val, read) != BCME_OK) {
11596 		DHD_ERROR(("sbreg: Invalid uint addr: 0x%x \n", addr));
11597 	} else {
11598 		DHD_ERROR(("sbreg: addr:0x%x val:0x%x read:%d\n", addr, *val, read));
11599 	}
11600 	return;
11601 }
11602 
11603 #ifdef DHD_SSSR_DUMP
11604 static int
dhdpcie_get_sssr_fifo_dump(dhd_pub_t * dhd,uint * buf,uint fifo_size,uint addr_reg,uint data_reg)11605 dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
11606 	uint addr_reg, uint data_reg)
11607 {
11608 	uint addr;
11609 	uint val = 0;
11610 	int i;
11611 
11612 	DHD_ERROR(("%s\n", __FUNCTION__));
11613 
11614 	if (!buf) {
11615 		DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
11616 		return BCME_ERROR;
11617 	}
11618 
11619 	if (!fifo_size) {
11620 		DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
11621 		return BCME_ERROR;
11622 	}
11623 
11624 	/* Set the base address offset to 0 */
11625 	addr = addr_reg;
11626 	val = 0;
11627 	dhd_sbreg_op(dhd, addr, &val, FALSE);
11628 
11629 	addr = data_reg;
11630 	/* Read 4 bytes at once and loop for fifo_size / 4 */
11631 	for (i = 0; i < fifo_size / 4; i++) {
11632 		if (serialized_backplane_access(dhd->bus, addr,
11633 				sizeof(uint), &val, TRUE) != BCME_OK) {
11634 			DHD_ERROR(("%s: error in serialized_backplane_access\n", __FUNCTION__));
11635 			return BCME_ERROR;
11636 		}
11637 		buf[i] = val;
11638 		OSL_DELAY(1);
11639 	}
11640 	return BCME_OK;
11641 }
11642 
11643 static int
dhdpcie_get_sssr_dig_dump(dhd_pub_t * dhd,uint * buf,uint fifo_size,uint addr_reg)11644 dhdpcie_get_sssr_dig_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
11645 	uint addr_reg)
11646 {
11647 	uint addr;
11648 	uint val = 0;
11649 	int i;
11650 	si_t *sih = dhd->bus->sih;
11651 
11652 	DHD_ERROR(("%s\n", __FUNCTION__));
11653 
11654 	if (!buf) {
11655 		DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
11656 		return BCME_ERROR;
11657 	}
11658 
11659 	if (!fifo_size) {
11660 		DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
11661 		return BCME_ERROR;
11662 	}
11663 
11664 	if (addr_reg) {
11665 
11666 		if ((!dhd->sssr_reg_info.vasip_regs.vasip_sr_size) &&
11667 			dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) {
11668 			int err = dhdpcie_bus_membytes(dhd->bus, FALSE, addr_reg, (uint8 *)buf,
11669 				fifo_size);
11670 			if (err != BCME_OK) {
11671 				DHD_ERROR(("%s: Error reading dig dump from dongle !\n",
11672 					__FUNCTION__));
11673 			}
11674 		} else {
11675 			/* Check if vasip clk is disabled, if yes enable it */
11676 			addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
11677 			dhd_sbreg_op(dhd, addr, &val, TRUE);
11678 			if (!val) {
11679 				val = 1;
11680 				dhd_sbreg_op(dhd, addr, &val, FALSE);
11681 			}
11682 
11683 			addr = addr_reg;
11684 			/* Read 4 bytes at once and loop for fifo_size / 4 */
11685 			for (i = 0; i < fifo_size / 4; i++, addr += 4) {
11686 				if (serialized_backplane_access(dhd->bus, addr, sizeof(uint),
11687 					&val, TRUE) != BCME_OK) {
11688 					DHD_ERROR(("%s: Invalid uint addr: 0x%x \n", __FUNCTION__,
11689 						addr));
11690 					return BCME_ERROR;
11691 				}
11692 				buf[i] = val;
11693 				OSL_DELAY(1);
11694 			}
11695 		}
11696 	} else {
11697 		uint cur_coreid;
11698 		uint chipc_corerev;
11699 		chipcregs_t *chipcregs;
11700 
11701 		/* Save the current core */
11702 		cur_coreid = si_coreid(sih);
11703 
11704 		/* Switch to ChipC */
11705 		chipcregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
11706 
11707 		chipc_corerev = si_corerev(sih);
11708 
11709 		if ((chipc_corerev == 64) || (chipc_corerev == 65)) {
11710 			W_REG(si_osh(sih), &chipcregs->sr_memrw_addr, 0);
11711 
11712 			/* Read 4 bytes at once and loop for fifo_size / 4 */
11713 			for (i = 0; i < fifo_size / 4; i++) {
11714 				buf[i] = R_REG(si_osh(sih), &chipcregs->sr_memrw_data);
11715 				OSL_DELAY(1);
11716 			}
11717 		}
11718 
11719 		/* Switch back to the original core */
11720 		si_setcore(sih, cur_coreid, 0);
11721 	}
11722 
11723 	return BCME_OK;
11724 }
11725 
11726 #if defined(EWP_ETD_PRSRV_LOGS)
11727 void
dhdpcie_get_etd_preserve_logs(dhd_pub_t * dhd,uint8 * ext_trap_data,void * event_decode_data)11728 dhdpcie_get_etd_preserve_logs(dhd_pub_t *dhd,
11729 		uint8 *ext_trap_data, void *event_decode_data)
11730 {
11731 	hnd_ext_trap_hdr_t *hdr = NULL;
11732 	bcm_tlv_t *tlv;
11733 	eventlog_trapdata_info_t *etd_evtlog = NULL;
11734 	eventlog_trap_buf_info_t *evtlog_buf_arr = NULL;
11735 	uint arr_size = 0;
11736 	int i = 0;
11737 	int err = 0;
11738 	uint32 seqnum = 0;
11739 
11740 	if (!ext_trap_data || !event_decode_data || !dhd)
11741 		return;
11742 
11743 	if (!dhd->concise_dbg_buf)
11744 		return;
11745 
11746 	/* First word is original trap_data, skip */
11747 	ext_trap_data += sizeof(uint32);
11748 
11749 	hdr = (hnd_ext_trap_hdr_t *)ext_trap_data;
11750 	tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_LOG_DATA);
11751 	if (tlv) {
11752 		uint32 baseaddr = 0;
11753 		uint32 endaddr = dhd->bus->dongle_ram_base + dhd->bus->ramsize - 4;
11754 
11755 		etd_evtlog = (eventlog_trapdata_info_t *)tlv->data;
11756 		DHD_ERROR(("%s: etd_evtlog tlv found, num_elements=%x; "
11757 			"seq_num=%x; log_arr_addr=%x\n", __FUNCTION__,
11758 			(etd_evtlog->num_elements),
11759 			ntoh32(etd_evtlog->seq_num), (etd_evtlog->log_arr_addr)));
11760 		arr_size = (uint32)sizeof(*evtlog_buf_arr) * (etd_evtlog->num_elements);
11761 		if (!arr_size) {
11762 			DHD_ERROR(("%s: num event logs is zero! \n", __FUNCTION__));
11763 			return;
11764 		}
11765 		evtlog_buf_arr = MALLOCZ(dhd->osh, arr_size);
11766 		if (!evtlog_buf_arr) {
11767 			DHD_ERROR(("%s: out of memory !\n",	__FUNCTION__));
11768 			return;
11769 		}
11770 
11771 		/* boundary check */
11772 		baseaddr = etd_evtlog->log_arr_addr;
11773 		if ((baseaddr < dhd->bus->dongle_ram_base) ||
11774 			((baseaddr + arr_size) > endaddr)) {
11775 			DHD_ERROR(("%s: Error reading invalid address\n",
11776 				__FUNCTION__));
11777 			goto err;
11778 		}
11779 
11780 		/* read the eventlog_trap_buf_info_t array from dongle memory */
11781 		err = dhdpcie_bus_membytes(dhd->bus, FALSE,
11782 				(ulong)(etd_evtlog->log_arr_addr),
11783 				(uint8 *)evtlog_buf_arr, arr_size);
11784 		if (err != BCME_OK) {
11785 			DHD_ERROR(("%s: Error reading event log array from dongle !\n",
11786 				__FUNCTION__));
11787 			goto err;
11788 		}
11789 		/* ntoh is required only for seq_num, because in the original
11790 		* case of event logs from info ring, it is sent from dongle in that way
11791 		* so for ETD also dongle follows same convention
11792 		*/
11793 		seqnum = ntoh32(etd_evtlog->seq_num);
11794 		memset(dhd->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
11795 		for (i = 0; i < (etd_evtlog->num_elements); ++i) {
11796 			/* boundary check */
11797 			baseaddr = evtlog_buf_arr[i].buf_addr;
11798 			if ((baseaddr < dhd->bus->dongle_ram_base) ||
11799 				((baseaddr + evtlog_buf_arr[i].len) > endaddr)) {
11800 				DHD_ERROR(("%s: Error reading invalid address\n",
11801 					__FUNCTION__));
11802 				goto err;
11803 			}
11804 			/* read each individual event log buf from dongle memory */
11805 			err = dhdpcie_bus_membytes(dhd->bus, FALSE,
11806 					((ulong)evtlog_buf_arr[i].buf_addr),
11807 					dhd->concise_dbg_buf, (evtlog_buf_arr[i].len));
11808 			if (err != BCME_OK) {
11809 				DHD_ERROR(("%s: Error reading event log buffer from dongle !\n",
11810 					__FUNCTION__));
11811 				goto err;
11812 			}
11813 			dhd_dbg_msgtrace_log_parser(dhd, dhd->concise_dbg_buf,
11814 				event_decode_data, (evtlog_buf_arr[i].len),
11815 				FALSE, hton32(seqnum));
11816 			++seqnum;
11817 		}
11818 err:
11819 		MFREE(dhd->osh, evtlog_buf_arr, arr_size);
11820 	} else {
11821 		DHD_ERROR(("%s: Error getting trap log data in ETD !\n", __FUNCTION__));
11822 	}
11823 }
11824 #endif /* BCMPCIE && DHD_LOG_DUMP */
11825 
11826 static uint32
dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t * dhd,uint32 reg_val)11827 dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd, uint32 reg_val)
11828 {
11829 	uint addr;
11830 	uint val = 0;
11831 
11832 	DHD_ERROR(("%s\n", __FUNCTION__));
11833 
11834 	/* conditionally clear bits [11:8] of PowerCtrl */
11835 	addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
11836 	dhd_sbreg_op(dhd, addr, &val, TRUE);
11837 	if (!(val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask)) {
11838 		addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
11839 		dhd_sbreg_op(dhd, addr, &reg_val, FALSE);
11840 	}
11841 	return BCME_OK;
11842 }
11843 
11844 static uint32
dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t * dhd)11845 dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd)
11846 {
11847 	uint addr;
11848 	uint val = 0, reg_val = 0;
11849 
11850 	DHD_ERROR(("%s\n", __FUNCTION__));
11851 
11852 	/* conditionally clear bits [11:8] of PowerCtrl */
11853 	addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
11854 	dhd_sbreg_op(dhd, addr, &reg_val, TRUE);
11855 	if (reg_val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) {
11856 		addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
11857 		val = 0;
11858 		dhd_sbreg_op(dhd, addr, &val, FALSE);
11859 	}
11860 	return reg_val;
11861 }
11862 
11863 static int
dhdpcie_clear_intmask_and_timer(dhd_pub_t * dhd)11864 dhdpcie_clear_intmask_and_timer(dhd_pub_t *dhd)
11865 {
11866 	uint addr;
11867 	uint val;
11868 
11869 	DHD_ERROR(("%s\n", __FUNCTION__));
11870 
11871 	/* clear chipcommon intmask */
11872 	addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.intmask;
11873 	val = 0x0;
11874 	dhd_sbreg_op(dhd, addr, &val, FALSE);
11875 
11876 	/* clear PMUIntMask0 */
11877 	addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask0;
11878 	val = 0x0;
11879 	dhd_sbreg_op(dhd, addr, &val, FALSE);
11880 
11881 	/* clear PMUIntMask1 */
11882 	addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask1;
11883 	val = 0x0;
11884 	dhd_sbreg_op(dhd, addr, &val, FALSE);
11885 
11886 	/* clear res_req_timer */
11887 	addr = dhd->sssr_reg_info.pmu_regs.base_regs.resreqtimer;
11888 	val = 0x0;
11889 	dhd_sbreg_op(dhd, addr, &val, FALSE);
11890 
11891 	/* clear macresreqtimer */
11892 	addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer;
11893 	val = 0x0;
11894 	dhd_sbreg_op(dhd, addr, &val, FALSE);
11895 
11896 	/* clear macresreqtimer1 */
11897 	addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer1;
11898 	val = 0x0;
11899 	dhd_sbreg_op(dhd, addr, &val, FALSE);
11900 
11901 	/* clear VasipClkEn */
11902 	if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
11903 		addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
11904 		val = 0x0;
11905 		dhd_sbreg_op(dhd, addr, &val, FALSE);
11906 	}
11907 
11908 	return BCME_OK;
11909 }
11910 
11911 static void
dhdpcie_update_d11_status_from_trapdata(dhd_pub_t * dhd)11912 dhdpcie_update_d11_status_from_trapdata(dhd_pub_t *dhd)
11913 {
11914 #define TRAP_DATA_MAIN_CORE_BIT_MASK	(1 << 1)
11915 #define TRAP_DATA_AUX_CORE_BIT_MASK	(1 << 4)
11916 	uint trap_data_mask[MAX_NUM_D11CORES] =
11917 		{TRAP_DATA_MAIN_CORE_BIT_MASK, TRAP_DATA_AUX_CORE_BIT_MASK};
11918 	int i;
11919 	/* Apply only for 4375 chip */
11920 	if (dhd_bus_chip_id(dhd) == BCM4375_CHIP_ID) {
11921 		for (i = 0; i < MAX_NUM_D11CORES; i++) {
11922 			if (dhd->sssr_d11_outofreset[i] &&
11923 				(dhd->dongle_trap_data & trap_data_mask[i])) {
11924 				dhd->sssr_d11_outofreset[i] = TRUE;
11925 			} else {
11926 				dhd->sssr_d11_outofreset[i] = FALSE;
11927 			}
11928 			DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d after AND with "
11929 				"trap_data:0x%x-0x%x\n",
11930 				__FUNCTION__, i, dhd->sssr_d11_outofreset[i],
11931 				dhd->dongle_trap_data, trap_data_mask[i]));
11932 		}
11933 	}
11934 }
11935 
11936 static int
dhdpcie_d11_check_outofreset(dhd_pub_t * dhd)11937 dhdpcie_d11_check_outofreset(dhd_pub_t *dhd)
11938 {
11939 	int i;
11940 	uint addr;
11941 	uint val = 0;
11942 
11943 	DHD_ERROR(("%s\n", __FUNCTION__));
11944 
11945 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
11946 		/* Check if bit 0 of resetctrl is cleared */
11947 		addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
11948 		if (!addr) {
11949 			DHD_ERROR(("%s: skipping for core[%d] as 'addr' is NULL\n",
11950 				__FUNCTION__, i));
11951 			continue;
11952 		}
11953 		dhd_sbreg_op(dhd, addr, &val, TRUE);
11954 		if (!(val & 1)) {
11955 			dhd->sssr_d11_outofreset[i] = TRUE;
11956 		} else {
11957 			dhd->sssr_d11_outofreset[i] = FALSE;
11958 		}
11959 		DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n",
11960 			__FUNCTION__, i, dhd->sssr_d11_outofreset[i]));
11961 	}
11962 	dhdpcie_update_d11_status_from_trapdata(dhd);
11963 
11964 	return BCME_OK;
11965 }
11966 
11967 static int
dhdpcie_d11_clear_clk_req(dhd_pub_t * dhd)11968 dhdpcie_d11_clear_clk_req(dhd_pub_t *dhd)
11969 {
11970 	int i;
11971 	uint addr;
11972 	uint val = 0;
11973 
11974 	DHD_ERROR(("%s\n", __FUNCTION__));
11975 
11976 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
11977 		if (dhd->sssr_d11_outofreset[i]) {
11978 			/* clear request clk only if itopoobb is non zero */
11979 			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.itopoobb;
11980 			dhd_sbreg_op(dhd, addr, &val, TRUE);
11981 			if (val != 0) {
11982 				/* clear clockcontrolstatus */
11983 				addr = dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus;
11984 				val =
11985 				dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus_val;
11986 				dhd_sbreg_op(dhd, addr, &val, FALSE);
11987 			}
11988 		}
11989 	}
11990 	return BCME_OK;
11991 }
11992 
11993 static int
dhdpcie_arm_clear_clk_req(dhd_pub_t * dhd)11994 dhdpcie_arm_clear_clk_req(dhd_pub_t *dhd)
11995 {
11996 	uint addr;
11997 	uint val = 0;
11998 
11999 	DHD_ERROR(("%s\n", __FUNCTION__));
12000 
12001 	/* Check if bit 0 of resetctrl is cleared */
12002 	addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
12003 	dhd_sbreg_op(dhd, addr, &val, TRUE);
12004 	if (!(val & 1)) {
12005 		/* clear request clk only if itopoobb is non zero */
12006 		addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.itopoobb;
12007 		dhd_sbreg_op(dhd, addr, &val, TRUE);
12008 		if (val != 0) {
12009 			/* clear clockcontrolstatus */
12010 			addr = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus;
12011 			val = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus_val;
12012 			dhd_sbreg_op(dhd, addr, &val, FALSE);
12013 		}
12014 	}
12015 	return BCME_OK;
12016 }
12017 
12018 static int
dhdpcie_pcie_clear_clk_req(dhd_pub_t * dhd)12019 dhdpcie_pcie_clear_clk_req(dhd_pub_t *dhd)
12020 {
12021 	uint addr;
12022 	uint val = 0;
12023 
12024 	DHD_ERROR(("%s\n", __FUNCTION__));
12025 
12026 	/* clear request clk only if itopoobb is non zero */
12027 	addr = dhd->sssr_reg_info.pcie_regs.wrapper_regs.itopoobb;
12028 	dhd_sbreg_op(dhd, addr, &val, TRUE);
12029 	if (val) {
12030 		/* clear clockcontrolstatus */
12031 		addr = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus;
12032 		val = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus_val;
12033 		dhd_sbreg_op(dhd, addr, &val, FALSE);
12034 	}
12035 	return BCME_OK;
12036 }
12037 
12038 static int
dhdpcie_pcie_send_ltrsleep(dhd_pub_t * dhd)12039 dhdpcie_pcie_send_ltrsleep(dhd_pub_t *dhd)
12040 {
12041 	uint addr;
12042 	uint val = 0;
12043 
12044 	DHD_ERROR(("%s\n", __FUNCTION__));
12045 
12046 	addr = dhd->sssr_reg_info.pcie_regs.base_regs.ltrstate;
12047 	val = LTR_ACTIVE;
12048 	dhd_sbreg_op(dhd, addr, &val, FALSE);
12049 
12050 	val = LTR_SLEEP;
12051 	dhd_sbreg_op(dhd, addr, &val, FALSE);
12052 
12053 	return BCME_OK;
12054 }
12055 
12056 static int
dhdpcie_clear_clk_req(dhd_pub_t * dhd)12057 dhdpcie_clear_clk_req(dhd_pub_t *dhd)
12058 {
12059 	DHD_ERROR(("%s\n", __FUNCTION__));
12060 
12061 	dhdpcie_arm_clear_clk_req(dhd);
12062 
12063 	dhdpcie_d11_clear_clk_req(dhd);
12064 
12065 	dhdpcie_pcie_clear_clk_req(dhd);
12066 
12067 	return BCME_OK;
12068 }
12069 
12070 static int
dhdpcie_bring_d11_outofreset(dhd_pub_t * dhd)12071 dhdpcie_bring_d11_outofreset(dhd_pub_t *dhd)
12072 {
12073 	int i;
12074 	uint addr;
12075 	uint val = 0;
12076 
12077 	DHD_ERROR(("%s\n", __FUNCTION__));
12078 
12079 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
12080 		if (dhd->sssr_d11_outofreset[i]) {
12081 			/* disable core by setting bit 0 */
12082 			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
12083 			val = 1;
12084 			dhd_sbreg_op(dhd, addr, &val, FALSE);
12085 			OSL_DELAY(6000);
12086 
12087 			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
12088 			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0];
12089 			dhd_sbreg_op(dhd, addr, &val, FALSE);
12090 
12091 			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1];
12092 			dhd_sbreg_op(dhd, addr, &val, FALSE);
12093 
12094 			/* enable core by clearing bit 0 */
12095 			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
12096 			val = 0;
12097 			dhd_sbreg_op(dhd, addr, &val, FALSE);
12098 
12099 			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
12100 			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2];
12101 			dhd_sbreg_op(dhd, addr, &val, FALSE);
12102 
12103 			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3];
12104 			dhd_sbreg_op(dhd, addr, &val, FALSE);
12105 
12106 			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4];
12107 			dhd_sbreg_op(dhd, addr, &val, FALSE);
12108 		}
12109 	}
12110 	return BCME_OK;
12111 }
12112 
12113 static int
dhdpcie_sssr_dump_get_before_sr(dhd_pub_t * dhd)12114 dhdpcie_sssr_dump_get_before_sr(dhd_pub_t *dhd)
12115 {
12116 	int i;
12117 
12118 	DHD_ERROR(("%s\n", __FUNCTION__));
12119 
12120 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
12121 		if (dhd->sssr_d11_outofreset[i]) {
12122 			dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_before[i],
12123 				dhd->sssr_reg_info.mac_regs[i].sr_size,
12124 				dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
12125 				dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
12126 		}
12127 	}
12128 
12129 	if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
12130 		dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
12131 			dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
12132 			dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
12133 	} else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
12134 		dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
12135 		dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_before,
12136 			dhd->sssr_reg_info.dig_mem_info.dig_sr_size,
12137 			dhd->sssr_reg_info.dig_mem_info.dig_sr_addr);
12138 	}
12139 
12140 	return BCME_OK;
12141 }
12142 
12143 static int
dhdpcie_sssr_dump_get_after_sr(dhd_pub_t * dhd)12144 dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd)
12145 {
12146 	int i;
12147 
12148 	DHD_ERROR(("%s\n", __FUNCTION__));
12149 
12150 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
12151 		if (dhd->sssr_d11_outofreset[i]) {
12152 			dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_after[i],
12153 				dhd->sssr_reg_info.mac_regs[i].sr_size,
12154 				dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
12155 				dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
12156 		}
12157 	}
12158 
12159 	if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
12160 		dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after,
12161 			dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
12162 			dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
12163 	} else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
12164 		dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
12165 		dhdpcie_get_sssr_dig_dump(dhd, dhd->sssr_dig_buf_after,
12166 			dhd->sssr_reg_info.dig_mem_info.dig_sr_size,
12167 			dhd->sssr_reg_info.dig_mem_info.dig_sr_addr);
12168 	}
12169 
12170 	return BCME_OK;
12171 }
12172 
12173 int
dhdpcie_sssr_dump(dhd_pub_t * dhd)12174 dhdpcie_sssr_dump(dhd_pub_t *dhd)
12175 {
12176 	uint32 powerctrl_val;
12177 
12178 	if (!dhd->sssr_inited) {
12179 		DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
12180 		return BCME_ERROR;
12181 	}
12182 
12183 	if (dhd->bus->is_linkdown) {
12184 		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
12185 		return BCME_ERROR;
12186 	}
12187 
12188 	dhdpcie_d11_check_outofreset(dhd);
12189 
12190 	DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__));
12191 	if (dhdpcie_sssr_dump_get_before_sr(dhd) != BCME_OK) {
12192 		DHD_ERROR(("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__));
12193 		return BCME_ERROR;
12194 	}
12195 
12196 	dhdpcie_clear_intmask_and_timer(dhd);
12197 	powerctrl_val = dhdpcie_suspend_chipcommon_powerctrl(dhd);
12198 	dhdpcie_clear_clk_req(dhd);
12199 	dhdpcie_pcie_send_ltrsleep(dhd);
12200 
12201 	/* Wait for some time before Restore */
12202 	OSL_DELAY(6000);
12203 
12204 	dhdpcie_resume_chipcommon_powerctrl(dhd, powerctrl_val);
12205 	dhdpcie_bring_d11_outofreset(dhd);
12206 
12207 	DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
12208 	if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
12209 		DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
12210 		return BCME_ERROR;
12211 	}
12212 	dhd->sssr_dump_collected = TRUE;
12213 	dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_SSSR);
12214 
12215 	return BCME_OK;
12216 }
12217 
12218 static int
dhdpcie_fis_trigger(dhd_pub_t * dhd)12219 dhdpcie_fis_trigger(dhd_pub_t *dhd)
12220 {
12221 	if (!dhd->sssr_inited) {
12222 		DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
12223 		return BCME_ERROR;
12224 	}
12225 
12226 	if (dhd->bus->is_linkdown) {
12227 		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
12228 		return BCME_ERROR;
12229 	}
12230 
12231 	/* Trigger FIS */
12232 	si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12233 		DAR_FIS_CTRL(dhd->bus->sih->buscorerev), ~0, DAR_FIS_START_MASK);
12234 	OSL_DELAY(100 * 1000);
12235 
12236 	return BCME_OK;
12237 }
12238 
12239 int
dhd_bus_fis_trigger(dhd_pub_t * dhd)12240 dhd_bus_fis_trigger(dhd_pub_t *dhd)
12241 {
12242 	return dhdpcie_fis_trigger(dhd);
12243 }
12244 
12245 static int
dhdpcie_fis_dump(dhd_pub_t * dhd)12246 dhdpcie_fis_dump(dhd_pub_t *dhd)
12247 {
12248 	int i;
12249 
12250 	if (!dhd->sssr_inited) {
12251 		DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
12252 		return BCME_ERROR;
12253 	}
12254 
12255 	if (dhd->bus->is_linkdown) {
12256 		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
12257 		return BCME_ERROR;
12258 	}
12259 
12260 	/* bring up all pmu resources */
12261 	PMU_REG(dhd->bus->sih, min_res_mask, ~0,
12262 		PMU_REG(dhd->bus->sih, max_res_mask, 0, 0));
12263 	OSL_DELAY(10 * 1000);
12264 
12265 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
12266 		dhd->sssr_d11_outofreset[i] = TRUE;
12267 	}
12268 
12269 	dhdpcie_bring_d11_outofreset(dhd);
12270 	OSL_DELAY(6000);
12271 
12272 	/* clear FIS Done */
12273 	PMU_REG(dhd->bus->sih, fis_ctrl_status, PMU_CLEAR_FIS_DONE_MASK, PMU_CLEAR_FIS_DONE_MASK);
12274 
12275 	dhdpcie_d11_check_outofreset(dhd);
12276 
12277 	DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
12278 	if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
12279 		DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
12280 		return BCME_ERROR;
12281 	}
12282 
12283 	dhd_write_sssr_dump(dhd, SSSR_DUMP_MODE_FIS);
12284 
12285 	return BCME_OK;
12286 }
12287 
12288 int
dhd_bus_fis_dump(dhd_pub_t * dhd)12289 dhd_bus_fis_dump(dhd_pub_t *dhd)
12290 {
12291 	return dhdpcie_fis_dump(dhd);
12292 }
12293 #endif /* DHD_SSSR_DUMP */
12294 
12295 #ifdef DHD_WAKE_STATUS
12296 wake_counts_t*
dhd_bus_get_wakecount(dhd_pub_t * dhd)12297 dhd_bus_get_wakecount(dhd_pub_t *dhd)
12298 {
12299 	return &dhd->bus->wake_counts;
12300 }
12301 int
dhd_bus_get_bus_wake(dhd_pub_t * dhd)12302 dhd_bus_get_bus_wake(dhd_pub_t *dhd)
12303 {
12304 	return bcmpcie_set_get_wake(dhd->bus, 0);
12305 }
12306 #endif /* DHD_WAKE_STATUS */
12307 
12308 /* Writes random number(s) to the TCM. FW upon initialization reads this register
12309  * to fetch the random number, and uses it to randomize heap address space layout.
12310  */
12311 static int
dhdpcie_wrt_rnd(struct dhd_bus * bus)12312 dhdpcie_wrt_rnd(struct dhd_bus *bus)
12313 {
12314 	bcm_rand_metadata_t rnd_data;
12315 	uint8 rand_buf[BCM_ENTROPY_HOST_NBYTES];
12316 	uint32 count = BCM_ENTROPY_HOST_NBYTES;
12317 	int ret = 0;
12318 	uint32 addr = bus->dongle_ram_base + (bus->ramsize - BCM_NVRAM_OFFSET_TCM) -
12319 		((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data));
12320 
12321 	memset(rand_buf, 0, BCM_ENTROPY_HOST_NBYTES);
12322 	rnd_data.signature = htol32(BCM_NVRAM_RNG_SIGNATURE);
12323 	rnd_data.count = htol32(count);
12324 	/* write the metadata about random number */
12325 	dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rnd_data, sizeof(rnd_data));
12326 	/* scale back by number of random number counts */
12327 	addr -= count;
12328 
12329 #ifdef DHD_RND_DEBUG
12330 	bus->dhd->rnd_buf = NULL;
12331 	/* get random contents from file */
12332 	ret = dhd_get_rnd_info(bus->dhd);
12333 	if (bus->dhd->rnd_buf) {
12334 		/* write file contents to TCM */
12335 		DHD_ERROR(("%s: use stored .rnd.in content\n", __FUNCTION__));
12336 		dhdpcie_bus_membytes(bus, TRUE, addr, bus->dhd->rnd_buf, bus->dhd->rnd_len);
12337 
12338 		/* Dump random content to out file */
12339 		dhd_dump_rnd_info(bus->dhd, bus->dhd->rnd_buf, bus->dhd->rnd_len);
12340 
12341 		/* bus->dhd->rnd_buf is allocated in dhd_get_rnd_info, free here */
12342 		MFREE(bus->dhd->osh, bus->dhd->rnd_buf, bus->dhd->rnd_len);
12343 		bus->dhd->rnd_buf = NULL;
12344 		return BCME_OK;
12345 	}
12346 #endif /* DHD_RND_DEBUG */
12347 
12348 	/* Now get & write the random number(s) */
12349 	ret = dhd_get_random_bytes(rand_buf, count);
12350 	if (ret != BCME_OK) {
12351 		return ret;
12352 	}
12353 	dhdpcie_bus_membytes(bus, TRUE, addr, rand_buf, count);
12354 
12355 #ifdef DHD_RND_DEBUG
12356 	/* Dump random content to out file */
12357 	dhd_dump_rnd_info(bus->dhd, rand_buf, count);
12358 #endif /* DHD_RND_DEBUG */
12359 
12360 	return BCME_OK;
12361 }
12362 
12363 void
dhd_pcie_intr_count_dump(dhd_pub_t * dhd)12364 dhd_pcie_intr_count_dump(dhd_pub_t *dhd)
12365 {
12366 	struct dhd_bus *bus = dhd->bus;
12367 	uint64 current_time;
12368 
12369 	DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters  ------- \r\n"));
12370 	DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n",
12371 		bus->resume_intr_enable_count, bus->dpc_intr_enable_count));
12372 	DHD_ERROR(("isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n",
12373 		bus->isr_intr_disable_count, bus->suspend_intr_disable_count));
12374 #ifdef BCMPCIE_OOB_HOST_WAKE
12375 	DHD_ERROR(("oob_intr_count=%lu oob_intr_enable_count=%lu oob_intr_disable_count=%lu\n",
12376 		bus->oob_intr_count, bus->oob_intr_enable_count,
12377 		bus->oob_intr_disable_count));
12378 	DHD_ERROR(("oob_irq_num=%d last_oob_irq_time="SEC_USEC_FMT"\n",
12379 		dhdpcie_get_oob_irq_num(bus),
12380 		GET_SEC_USEC(bus->last_oob_irq_time)));
12381 	DHD_ERROR(("last_oob_irq_enable_time="SEC_USEC_FMT
12382 		" last_oob_irq_disable_time="SEC_USEC_FMT"\n",
12383 		GET_SEC_USEC(bus->last_oob_irq_enable_time),
12384 		GET_SEC_USEC(bus->last_oob_irq_disable_time)));
12385 	DHD_ERROR(("oob_irq_enabled=%d oob_gpio_level=%d\n",
12386 		dhdpcie_get_oob_irq_status(bus),
12387 		dhdpcie_get_oob_irq_level()));
12388 #endif /* BCMPCIE_OOB_HOST_WAKE */
12389 	DHD_ERROR(("dpc_return_busdown_count=%lu non_ours_irq_count=%lu\n",
12390 		bus->dpc_return_busdown_count, bus->non_ours_irq_count));
12391 
12392 	current_time = OSL_LOCALTIME_NS();
12393 	DHD_ERROR(("\ncurrent_time="SEC_USEC_FMT"\n",
12394 		GET_SEC_USEC(current_time)));
12395 	DHD_ERROR(("isr_entry_time="SEC_USEC_FMT
12396 		" isr_exit_time="SEC_USEC_FMT"\n",
12397 		GET_SEC_USEC(bus->isr_entry_time),
12398 		GET_SEC_USEC(bus->isr_exit_time)));
12399 	DHD_ERROR(("dpc_sched_time="SEC_USEC_FMT
12400 		" last_non_ours_irq_time="SEC_USEC_FMT"\n",
12401 		GET_SEC_USEC(bus->dpc_sched_time),
12402 		GET_SEC_USEC(bus->last_non_ours_irq_time)));
12403 	DHD_ERROR(("dpc_entry_time="SEC_USEC_FMT
12404 		" last_process_ctrlbuf_time="SEC_USEC_FMT"\n",
12405 		GET_SEC_USEC(bus->dpc_entry_time),
12406 		GET_SEC_USEC(bus->last_process_ctrlbuf_time)));
12407 	DHD_ERROR(("last_process_flowring_time="SEC_USEC_FMT
12408 		" last_process_txcpl_time="SEC_USEC_FMT"\n",
12409 		GET_SEC_USEC(bus->last_process_flowring_time),
12410 		GET_SEC_USEC(bus->last_process_txcpl_time)));
12411 	DHD_ERROR(("last_process_rxcpl_time="SEC_USEC_FMT
12412 		" last_process_infocpl_time="SEC_USEC_FMT
12413 		" last_process_edl_time="SEC_USEC_FMT"\n",
12414 		GET_SEC_USEC(bus->last_process_rxcpl_time),
12415 		GET_SEC_USEC(bus->last_process_infocpl_time),
12416 		GET_SEC_USEC(bus->last_process_edl_time)));
12417 	DHD_ERROR(("dpc_exit_time="SEC_USEC_FMT
12418 		" resched_dpc_time="SEC_USEC_FMT"\n",
12419 		GET_SEC_USEC(bus->dpc_exit_time),
12420 		GET_SEC_USEC(bus->resched_dpc_time)));
12421 	DHD_ERROR(("last_d3_inform_time="SEC_USEC_FMT"\n",
12422 		GET_SEC_USEC(bus->last_d3_inform_time)));
12423 
12424 	DHD_ERROR(("\nlast_suspend_start_time="SEC_USEC_FMT
12425 		" last_suspend_end_time="SEC_USEC_FMT"\n",
12426 		GET_SEC_USEC(bus->last_suspend_start_time),
12427 		GET_SEC_USEC(bus->last_suspend_end_time)));
12428 	DHD_ERROR(("last_resume_start_time="SEC_USEC_FMT
12429 		" last_resume_end_time="SEC_USEC_FMT"\n",
12430 		GET_SEC_USEC(bus->last_resume_start_time),
12431 		GET_SEC_USEC(bus->last_resume_end_time)));
12432 
12433 #if defined(SHOW_LOGTRACE) && defined(DHD_USE_KTHREAD_FOR_LOGTRACE)
12434 	DHD_ERROR(("logtrace_thread_entry_time="SEC_USEC_FMT
12435 		" logtrace_thread_sem_down_time="SEC_USEC_FMT
12436 		"\nlogtrace_thread_flush_time="SEC_USEC_FMT
12437 		" logtrace_thread_unexpected_break_time="SEC_USEC_FMT
12438 		"\nlogtrace_thread_complete_time="SEC_USEC_FMT"\n",
12439 		GET_SEC_USEC(dhd->logtrace_thr_ts.entry_time),
12440 		GET_SEC_USEC(dhd->logtrace_thr_ts.sem_down_time),
12441 		GET_SEC_USEC(dhd->logtrace_thr_ts.flush_time),
12442 		GET_SEC_USEC(dhd->logtrace_thr_ts.unexpected_break_time),
12443 		GET_SEC_USEC(dhd->logtrace_thr_ts.complete_time)));
12444 #endif /* SHOW_LOGTRACE && DHD_USE_KTHREAD_FOR_LOGTRACE */
12445 }
12446 
12447 void
dhd_bus_intr_count_dump(dhd_pub_t * dhd)12448 dhd_bus_intr_count_dump(dhd_pub_t *dhd)
12449 {
12450 	dhd_pcie_intr_count_dump(dhd);
12451 }
12452 
12453 int
dhd_pcie_dump_wrapper_regs(dhd_pub_t * dhd)12454 dhd_pcie_dump_wrapper_regs(dhd_pub_t *dhd)
12455 {
12456 	uint32 save_idx, val;
12457 	si_t *sih = dhd->bus->sih;
12458 	uint32 oob_base, oob_base1;
12459 	uint32 wrapper_dump_list[] = {
12460 		AI_OOBSELOUTA30, AI_OOBSELOUTA74, AI_OOBSELOUTB30, AI_OOBSELOUTB74,
12461 		AI_OOBSELOUTC30, AI_OOBSELOUTC74, AI_OOBSELOUTD30, AI_OOBSELOUTD74,
12462 		AI_RESETSTATUS, AI_RESETCTRL,
12463 		AI_ITIPOOBA, AI_ITIPOOBB, AI_ITIPOOBC, AI_ITIPOOBD,
12464 		AI_ITIPOOBAOUT, AI_ITIPOOBBOUT, AI_ITIPOOBCOUT, AI_ITIPOOBDOUT
12465 	};
12466 	uint8 i;
12467 	hndoobr_reg_t *reg;
12468 	cr4regs_t *cr4regs;
12469 	ca7regs_t *ca7regs;
12470 
12471 	save_idx = si_coreidx(sih);
12472 
12473 	DHD_ERROR(("%s: Master wrapper Reg\n", __FUNCTION__));
12474 
12475 	if (si_setcore(sih, PCIE2_CORE_ID, 0) != NULL) {
12476 		for (i = 0; i < sizeof(wrapper_dump_list) / 4; i++) {
12477 			val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
12478 			DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
12479 		}
12480 	}
12481 
12482 	if ((cr4regs = si_setcore(sih, ARMCR4_CORE_ID, 0)) != NULL) {
12483 		DHD_ERROR(("%s: ARM CR4 wrapper Reg\n", __FUNCTION__));
12484 		for (i = 0; i < sizeof(wrapper_dump_list) / 4; i++) {
12485 			val = si_wrapperreg(sih, wrapper_dump_list[i], 0, 0);
12486 			DHD_ERROR(("sbreg: addr:0x%x val:0x%x\n", wrapper_dump_list[i], val));
12487 		}
12488 		DHD_ERROR(("%s: ARM CR4 core Reg\n", __FUNCTION__));
12489 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecontrol));
12490 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corecontrol), val));
12491 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corecapabilities));
12492 		DHD_ERROR(("reg:0x%x val:0x%x\n",
12493 			(uint)OFFSETOF(cr4regs_t, corecapabilities), val));
12494 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, corestatus));
12495 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, corestatus), val));
12496 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmiisrst));
12497 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmiisrst), val));
12498 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, nmimask));
12499 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, nmimask), val));
12500 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, isrmask));
12501 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, isrmask), val));
12502 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, swintreg));
12503 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, swintreg), val));
12504 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, intstatus));
12505 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, intstatus), val));
12506 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, cyclecnt));
12507 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, cyclecnt), val));
12508 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, inttimer));
12509 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, inttimer), val));
12510 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, clk_ctl_st));
12511 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, clk_ctl_st), val));
12512 		val = R_REG(dhd->osh, ARM_CR4_REG(cr4regs, powerctl));
12513 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(cr4regs_t, powerctl), val));
12514 	}
12515 
12516 	if ((ca7regs = si_setcore(sih, ARMCA7_CORE_ID, 0)) != NULL) {
12517 		DHD_ERROR(("%s: ARM CA7 core Reg\n", __FUNCTION__));
12518 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecontrol));
12519 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corecontrol), val));
12520 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corecapabilities));
12521 		DHD_ERROR(("reg:0x%x val:0x%x\n",
12522 			(uint)OFFSETOF(ca7regs_t, corecapabilities), val));
12523 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, corestatus));
12524 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, corestatus), val));
12525 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, tracecontrol));
12526 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, tracecontrol), val));
12527 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, clk_ctl_st));
12528 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, clk_ctl_st), val));
12529 		val = R_REG(dhd->osh, ARM_CA7_REG(ca7regs, powerctl));
12530 		DHD_ERROR(("reg:0x%x val:0x%x\n", (uint)OFFSETOF(ca7regs_t, powerctl), val));
12531 	}
12532 
12533 	DHD_ERROR(("%s: OOBR Reg\n", __FUNCTION__));
12534 
12535 	oob_base = si_oobr_baseaddr(sih, FALSE);
12536 	oob_base1 = si_oobr_baseaddr(sih, TRUE);
12537 	if (oob_base) {
12538 		dhd_sbreg_op(dhd, oob_base + OOB_STATUSA, &val, TRUE);
12539 		dhd_sbreg_op(dhd, oob_base + OOB_STATUSB, &val, TRUE);
12540 		dhd_sbreg_op(dhd, oob_base + OOB_STATUSC, &val, TRUE);
12541 		dhd_sbreg_op(dhd, oob_base + OOB_STATUSD, &val, TRUE);
12542 	} else if ((reg = si_setcore(sih, HND_OOBR_CORE_ID, 0)) != NULL) {
12543 		val = R_REG(dhd->osh, &reg->intstatus[0]);
12544 		DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
12545 		val = R_REG(dhd->osh, &reg->intstatus[1]);
12546 		DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
12547 		val = R_REG(dhd->osh, &reg->intstatus[2]);
12548 		DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
12549 		val = R_REG(dhd->osh, &reg->intstatus[3]);
12550 		DHD_ERROR(("reg: addr:%p val:0x%x\n", reg, val));
12551 	}
12552 
12553 	if (oob_base1) {
12554 		DHD_ERROR(("%s: Second OOBR Reg\n", __FUNCTION__));
12555 
12556 		dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSA, &val, TRUE);
12557 		dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSB, &val, TRUE);
12558 		dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSC, &val, TRUE);
12559 		dhd_sbreg_op(dhd, oob_base1 + OOB_STATUSD, &val, TRUE);
12560 	}
12561 
12562 	si_setcoreidx(dhd->bus->sih, save_idx);
12563 
12564 	return 0;
12565 }
12566 
12567 int
dhd_pcie_dma_info_dump(dhd_pub_t * dhd)12568 dhd_pcie_dma_info_dump(dhd_pub_t *dhd)
12569 {
12570 	if (dhd->bus->is_linkdown) {
12571 		DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers "
12572 			"due to PCIe link down ------- \r\n"));
12573 		return 0;
12574 	}
12575 
12576 	DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n"));
12577 
12578 	//HostToDev
12579 	DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
12580 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0),
12581 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0)));
12582 	DHD_ERROR(("            : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
12583 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0),
12584 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0)));
12585 	DHD_ERROR(("            : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
12586 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0),
12587 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0)));
12588 
12589 	DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
12590 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0),
12591 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0)));
12592 	DHD_ERROR(("            : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
12593 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0),
12594 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0)));
12595 	DHD_ERROR(("            : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
12596 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0),
12597 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0)));
12598 
12599 	//DevToHost
12600 	DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
12601 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0),
12602 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0)));
12603 	DHD_ERROR(("            : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
12604 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0),
12605 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0)));
12606 	DHD_ERROR(("            : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
12607 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0),
12608 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0)));
12609 
12610 	DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
12611 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0),
12612 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0)));
12613 	DHD_ERROR(("            : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
12614 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0),
12615 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0)));
12616 	DHD_ERROR(("            : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
12617 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0),
12618 		si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0)));
12619 
12620 	return 0;
12621 }
12622 
12623 bool
dhd_pcie_dump_int_regs(dhd_pub_t * dhd)12624 dhd_pcie_dump_int_regs(dhd_pub_t *dhd)
12625 {
12626 	uint32 intstatus = 0;
12627 	uint32 intmask = 0;
12628 	uint32 d2h_db0 = 0;
12629 	uint32 d2h_mb_data = 0;
12630 
12631 	DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
12632 	intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12633 		dhd->bus->pcie_mailbox_int, 0, 0);
12634 	if (intstatus == (uint32)-1) {
12635 		DHD_ERROR(("intstatus=0x%x \n", intstatus));
12636 		return FALSE;
12637 	}
12638 
12639 	intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12640 		dhd->bus->pcie_mailbox_mask, 0, 0);
12641 	if (intmask == (uint32) -1) {
12642 		DHD_ERROR(("intstatus=0x%x intmask=0x%x \n", intstatus, intmask));
12643 		return FALSE;
12644 	}
12645 
12646 	d2h_db0 = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12647 		PCID2H_MailBox, 0, 0);
12648 	if (d2h_db0 == (uint32)-1) {
12649 		DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
12650 		intstatus, intmask, d2h_db0));
12651 		return FALSE;
12652 	}
12653 
12654 	DHD_ERROR(("intstatus=0x%x intmask=0x%x d2h_db0=0x%x\n",
12655 		intstatus, intmask, d2h_db0));
12656 	dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
12657 	DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data,
12658 		dhd->bus->def_intmask));
12659 
12660 	return TRUE;
12661 }
12662 
12663 void
dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t * dhd)12664 dhd_pcie_dump_rc_conf_space_cap(dhd_pub_t *dhd)
12665 {
12666 	DHD_ERROR(("\n ------- DUMPING PCIE RC config space Registers ------- \r\n"));
12667 	DHD_ERROR(("Pcie RC Uncorrectable Error Status Val=0x%x\n",
12668 		dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
12669 		PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
12670 #ifdef EXTENDED_PCIE_DEBUG_DUMP
12671 	DHD_ERROR(("hdrlog0 =0x%08x hdrlog1 =0x%08x hdrlog2 =0x%08x hdrlog3 =0x%08x\n",
12672 		dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
12673 		PCIE_EXTCAP_ERR_HEADER_LOG_0, TRUE, FALSE, 0),
12674 		dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
12675 		PCIE_EXTCAP_ERR_HEADER_LOG_1, TRUE, FALSE, 0),
12676 		dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
12677 		PCIE_EXTCAP_ERR_HEADER_LOG_2, TRUE, FALSE, 0),
12678 		dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
12679 		PCIE_EXTCAP_ERR_HEADER_LOG_3, TRUE, FALSE, 0)));
12680 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
12681 }
12682 
12683 int
dhd_pcie_debug_info_dump(dhd_pub_t * dhd)12684 dhd_pcie_debug_info_dump(dhd_pub_t *dhd)
12685 {
12686 	int host_irq_disabled;
12687 
12688 	DHD_ERROR(("bus->bus_low_power_state = %d\n", dhd->bus->bus_low_power_state));
12689 	host_irq_disabled = dhdpcie_irq_disabled(dhd->bus);
12690 	DHD_ERROR(("host pcie_irq disabled = %d\n", host_irq_disabled));
12691 	dhd_print_tasklet_status(dhd);
12692 	dhd_pcie_intr_count_dump(dhd);
12693 
12694 	DHD_ERROR(("\n ------- DUMPING PCIE EP Resouce Info ------- \r\n"));
12695 	dhdpcie_dump_resource(dhd->bus);
12696 
12697 	dhd_pcie_dump_rc_conf_space_cap(dhd);
12698 
12699 	DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n",
12700 		dhd_debug_get_rc_linkcap(dhd->bus)));
12701 
12702 	if (dhd->bus->is_linkdown && !dhd->bus->cto_triggered) {
12703 		DHD_ERROR(("Skip dumping the PCIe Config and Core registers. "
12704 			"link may be DOWN\n"));
12705 		return 0;
12706 	}
12707 
12708 	DHD_ERROR(("\n ------- DUMPING PCIE EP config space Registers ------- \r\n"));
12709 	DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x BaseAddress1(0x%x)=0x%x "
12710 		"PCIE_CFG_PMCSR(0x%x)=0x%x\n",
12711 		PCIECFGREG_STATUS_CMD,
12712 		dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_STATUS_CMD, sizeof(uint32)),
12713 		PCIECFGREG_BASEADDR0,
12714 		dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)),
12715 		PCIECFGREG_BASEADDR1,
12716 		dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR1, sizeof(uint32)),
12717 		PCIE_CFG_PMCSR,
12718 		dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_PMCSR, sizeof(uint32))));
12719 	DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x "
12720 		"L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL,
12721 		dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_LINK_STATUS_CTRL,
12722 		sizeof(uint32)), PCIECFGGEN_DEV_STATUS_CTRL2,
12723 		dhd_pcie_config_read(dhd->bus->osh, PCIECFGGEN_DEV_STATUS_CTRL2,
12724 		sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL1,
12725 		dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL1,
12726 		sizeof(uint32))));
12727 #ifdef EXTENDED_PCIE_DEBUG_DUMP
12728 	DHD_ERROR(("Pcie EP Uncorrectable Error Status Val=0x%x\n",
12729 		dhdpcie_ep_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
12730 		PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
12731 	DHD_ERROR(("hdrlog0(0x%x)=0x%08x hdrlog1(0x%x)=0x%08x hdrlog2(0x%x)=0x%08x "
12732 		"hdrlog3(0x%x)=0x%08x\n", PCI_TLP_HDR_LOG1,
12733 		dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG1, sizeof(uint32)),
12734 		PCI_TLP_HDR_LOG2,
12735 		dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG2, sizeof(uint32)),
12736 		PCI_TLP_HDR_LOG3,
12737 		dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG3, sizeof(uint32)),
12738 		PCI_TLP_HDR_LOG4,
12739 		dhd_pcie_config_read(dhd->bus->osh, PCI_TLP_HDR_LOG4, sizeof(uint32))));
12740 	if (dhd->bus->sih->buscorerev >= 24) {
12741 		DHD_ERROR(("DeviceStatusControl(0x%x)=0x%x SubsystemControl(0x%x)=0x%x "
12742 			"L1SSControl2(0x%x)=0x%x\n", PCIECFGREG_DEV_STATUS_CTRL,
12743 			dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_DEV_STATUS_CTRL,
12744 			sizeof(uint32)), PCIE_CFG_SUBSYSTEM_CONTROL,
12745 			dhd_pcie_config_read(dhd->bus->osh, PCIE_CFG_SUBSYSTEM_CONTROL,
12746 			sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL2,
12747 			dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL2,
12748 			sizeof(uint32))));
12749 		dhd_bus_dump_dar_registers(dhd->bus);
12750 	}
12751 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
12752 
12753 	if (dhd->bus->is_linkdown) {
12754 		DHD_ERROR(("Skip dumping the PCIe Core registers. link may be DOWN\n"));
12755 		return 0;
12756 	}
12757 
12758 	DHD_ERROR(("\n ------- DUMPING PCIE core Registers ------- \r\n"));
12759 
12760 	DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
12761 		"ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0,
12762 		dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0),
12763 		PCIECFGREG_PHY_DBG_CLKREQ1,
12764 		dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1),
12765 		PCIECFGREG_PHY_DBG_CLKREQ2,
12766 		dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2),
12767 		PCIECFGREG_PHY_DBG_CLKREQ3,
12768 		dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3)));
12769 
12770 #ifdef EXTENDED_PCIE_DEBUG_DUMP
12771 	if (dhd->bus->sih->buscorerev >= 24) {
12772 
12773 		DHD_ERROR(("ltssm_hist_0(0x%x)=0x%x ltssm_hist_1(0x%x)=0x%x "
12774 			"ltssm_hist_2(0x%x)=0x%x "
12775 			"ltssm_hist_3(0x%x)=0x%x\n", PCIECFGREG_PHY_LTSSM_HIST_0,
12776 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_0),
12777 			PCIECFGREG_PHY_LTSSM_HIST_1,
12778 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_1),
12779 			PCIECFGREG_PHY_LTSSM_HIST_2,
12780 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_2),
12781 			PCIECFGREG_PHY_LTSSM_HIST_3,
12782 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_LTSSM_HIST_3)));
12783 
12784 		DHD_ERROR(("trefup(0x%x)=0x%x trefup_ext(0x%x)=0x%x\n",
12785 			PCIECFGREG_TREFUP,
12786 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP),
12787 			PCIECFGREG_TREFUP_EXT,
12788 			dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_TREFUP_EXT)));
12789 		DHD_ERROR(("errlog(0x%x)=0x%x errlog_addr(0x%x)=0x%x "
12790 			"Function_Intstatus(0x%x)=0x%x "
12791 			"Function_Intmask(0x%x)=0x%x Power_Intstatus(0x%x)=0x%x "
12792 			"Power_Intmask(0x%x)=0x%x\n",
12793 			PCIE_CORE_REG_ERRLOG,
12794 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12795 			PCIE_CORE_REG_ERRLOG, 0, 0),
12796 			PCIE_CORE_REG_ERR_ADDR,
12797 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12798 				PCIE_CORE_REG_ERR_ADDR, 0, 0),
12799 			PCIFunctionIntstatus(dhd->bus->sih->buscorerev),
12800 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12801 				PCIFunctionIntstatus(dhd->bus->sih->buscorerev), 0, 0),
12802 			PCIFunctionIntmask(dhd->bus->sih->buscorerev),
12803 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12804 				PCIFunctionIntmask(dhd->bus->sih->buscorerev), 0, 0),
12805 			PCIPowerIntstatus(dhd->bus->sih->buscorerev),
12806 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12807 				PCIPowerIntstatus(dhd->bus->sih->buscorerev), 0, 0),
12808 			PCIPowerIntmask(dhd->bus->sih->buscorerev),
12809 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12810 				PCIPowerIntmask(dhd->bus->sih->buscorerev), 0, 0)));
12811 		DHD_ERROR(("err_hdrlog1(0x%x)=0x%x err_hdrlog2(0x%x)=0x%x "
12812 			"err_hdrlog3(0x%x)=0x%x err_hdrlog4(0x%x)=0x%x\n",
12813 			(uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1),
12814 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12815 				OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg1), 0, 0),
12816 			(uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2),
12817 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12818 				OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg2), 0, 0),
12819 			(uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3),
12820 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12821 				OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg3), 0, 0),
12822 			(uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4),
12823 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12824 				OFFSETOF(sbpcieregs_t, u.pcie2.err_hdr_logreg4), 0, 0)));
12825 		DHD_ERROR(("err_code(0x%x)=0x%x\n",
12826 			(uint)OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg),
12827 			si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
12828 				OFFSETOF(sbpcieregs_t, u.pcie2.err_code_logreg), 0, 0)));
12829 
12830 		dhd_pcie_dump_wrapper_regs(dhd);
12831 	}
12832 #endif /* EXTENDED_PCIE_DEBUG_DUMP */
12833 
12834 	dhd_pcie_dma_info_dump(dhd);
12835 
12836 	return 0;
12837 }
12838 
12839 bool
dhd_bus_force_bt_quiesce_enabled(struct dhd_bus * bus)12840 dhd_bus_force_bt_quiesce_enabled(struct dhd_bus *bus)
12841 {
12842 	return bus->force_bt_quiesce;
12843 }
12844 
12845 #ifdef DHD_HP2P
12846 uint16
dhd_bus_get_hp2p_ring_max_size(struct dhd_bus * bus,bool tx)12847 dhd_bus_get_hp2p_ring_max_size(struct dhd_bus *bus, bool tx)
12848 {
12849 	if (tx)
12850 		return bus->hp2p_txcpl_max_items;
12851 	else
12852 		return bus->hp2p_rxcpl_max_items;
12853 }
12854 
12855 static uint16
dhd_bus_set_hp2p_ring_max_size(struct dhd_bus * bus,bool tx,uint16 val)12856 dhd_bus_set_hp2p_ring_max_size(struct dhd_bus *bus, bool tx, uint16 val)
12857 {
12858 	if (tx)
12859 		bus->hp2p_txcpl_max_items = val;
12860 	else
12861 		bus->hp2p_rxcpl_max_items = val;
12862 	return val;
12863 }
12864 #endif /* DHD_HP2P */
12865 
12866 static bool
dhd_bus_tcm_test(struct dhd_bus * bus)12867 dhd_bus_tcm_test(struct dhd_bus *bus)
12868 {
12869 	int ret = 0;
12870 	int size; /* Full mem size */
12871 	int start; /* Start address */
12872 	int read_size = 0; /* Read size of each iteration */
12873 	int num = 0;
12874 	uint8 *read_buf, *write_buf;
12875 	uint8 init_val[NUM_PATTERNS] = {
12876 		0xFFu, /* 11111111 */
12877 		0x00u, /* 00000000 */
12878 	};
12879 
12880 	if (!bus) {
12881 		DHD_ERROR(("%s: bus is NULL !\n", __FUNCTION__));
12882 		return FALSE;
12883 	}
12884 
12885 	read_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
12886 
12887 	if (!read_buf) {
12888 		DHD_ERROR(("%s: MALLOC of read_buf failed\n", __FUNCTION__));
12889 		return FALSE;
12890 	}
12891 
12892 	write_buf = MALLOCZ(bus->dhd->osh, MEMBLOCK);
12893 
12894 	if (!write_buf) {
12895 		MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
12896 		DHD_ERROR(("%s: MALLOC of write_buf failed\n", __FUNCTION__));
12897 		return FALSE;
12898 	}
12899 
12900 	DHD_ERROR(("%s: start %x,  size: %x\n", __FUNCTION__, bus->dongle_ram_base, bus->ramsize));
12901 	DHD_ERROR(("%s: memblock size %d,  #pattern %d\n", __FUNCTION__, MEMBLOCK, NUM_PATTERNS));
12902 
12903 	while (num < NUM_PATTERNS) {
12904 		start = bus->dongle_ram_base;
12905 		/* Get full mem size */
12906 		size = bus->ramsize;
12907 
12908 		memset(write_buf, init_val[num], MEMBLOCK);
12909 		while (size > 0) {
12910 			read_size = MIN(MEMBLOCK, size);
12911 			memset(read_buf, 0, read_size);
12912 
12913 			/* Write */
12914 			if ((ret = dhdpcie_bus_membytes(bus, TRUE, start, write_buf, read_size))) {
12915 				DHD_ERROR(("%s: Write Error membytes %d\n", __FUNCTION__, ret));
12916 				MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
12917 				MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
12918 				return FALSE;
12919 			}
12920 
12921 			/* Read */
12922 			if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, read_buf, read_size))) {
12923 				DHD_ERROR(("%s: Read Error membytes %d\n", __FUNCTION__, ret));
12924 				MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
12925 				MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
12926 				return FALSE;
12927 			}
12928 
12929 			/* Compare */
12930 			if (memcmp(read_buf, write_buf, read_size)) {
12931 				DHD_ERROR(("%s: Mismatch at %x, iter : %d\n",
12932 					__FUNCTION__, start, num));
12933 				prhex("Readbuf", read_buf, read_size);
12934 				prhex("Writebuf", write_buf, read_size);
12935 				MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
12936 				MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
12937 				return FALSE;
12938 			}
12939 
12940 			/* Decrement size and increment start address */
12941 			size -= read_size;
12942 			start += read_size;
12943 		}
12944 		num++;
12945 	}
12946 
12947 	MFREE(bus->dhd->osh, read_buf, MEMBLOCK);
12948 	MFREE(bus->dhd->osh, write_buf, MEMBLOCK);
12949 
12950 	DHD_ERROR(("%s: Success iter : %d\n", __FUNCTION__, num));
12951 	return TRUE;
12952 }
12953