xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd_indep_power/dhd_pcie.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * DHD Bus Module for PCIE
4  *
5  * Copyright (C) 1999-2017, Broadcom Corporation
6  *
7  *      Unless you and Broadcom execute a separate written software license
8  * agreement governing use of this software, this software is licensed to you
9  * under the terms of the GNU General Public License version 2 (the "GPL"),
10  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11  * following added to such license:
12  *
13  *      As a special exception, the copyright holders of this software give you
14  * permission to link this software with independent modules, and to copy and
15  * distribute the resulting executable under terms of your choice, provided that
16  * you also meet, for each linked independent module, the terms and conditions of
17  * the license of that module.  An independent module is a module which is not
18  * derived from this software.  The special exception does not apply to any
19  * modifications of the software.
20  *
21  *      Notwithstanding the above, under no circumstances may you combine this
22  * software in any way with any other Broadcom software provided under a license
23  * other than the GPL, without Broadcom's express prior written consent.
24  *
25  *
26  * <<Broadcom-WL-IPTag/Open:>>
27  *
28  * $Id: dhd_pcie.c 710862 2017-07-14 07:43:59Z $
29  */
30 
31 
32 /* include files */
33 #include <typedefs.h>
34 #include <bcmutils.h>
35 #include <bcmdevs.h>
36 #include <siutils.h>
37 #include <hndsoc.h>
38 #include <hndpmu.h>
39 #include <hnd_debug.h>
40 #include <sbchipc.h>
41 #include <hnd_armtrap.h>
42 #if defined(DHD_DEBUG)
43 #include <hnd_cons.h>
44 #endif /* defined(DHD_DEBUG) */
45 #include <dngl_stats.h>
46 #include <pcie_core.h>
47 #include <dhd.h>
48 #include <dhd_bus.h>
49 #include <dhd_flowring.h>
50 #include <dhd_proto.h>
51 #include <dhd_dbg.h>
52 #include <dhd_daemon.h>
53 #include <dhdioctl.h>
54 #include <sdiovar.h>
55 #include <bcmmsgbuf.h>
56 #include <pcicfg.h>
57 #include <dhd_pcie.h>
58 #include <bcmpcie.h>
59 #include <bcmendian.h>
60 #ifdef DHDTCPACK_SUPPRESS
61 #include <dhd_ip.h>
62 #endif /* DHDTCPACK_SUPPRESS */
63 #include <bcmevent.h>
64 #include <dhd_config.h>
65 
66 #ifdef DHD_TIMESYNC
67 #include <dhd_timesync.h>
68 #endif /* DHD_TIMESYNC */
69 
70 #if defined(BCMEMBEDIMAGE)
71 #ifndef DHD_EFI
72 #include BCMEMBEDIMAGE
73 #else
74 #include <rtecdc_4364.h>
75 #endif /* !DHD_EFI */
76 #endif /* BCMEMBEDIMAGE */
77 
78 #define MEMBLOCK	2048		/* Block size used for downloading of dongle image */
79 #define MAX_WKLK_IDLE_CHECK	3	/* times wake_lock checked before deciding not to suspend */
80 
81 #define ARMCR4REG_BANKIDX	(0x40/sizeof(uint32))
82 #define ARMCR4REG_BANKPDA	(0x4C/sizeof(uint32))
83 /* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
84 
85 /* CTO Prevention Recovery */
86 #define CTO_TO_CLEAR_WAIT_MS 1000
87 #define CTO_TO_CLEAR_WAIT_MAX_CNT 10
88 
89 #if defined(SUPPORT_MULTIPLE_BOARD_REV)
90 	extern unsigned int system_rev;
91 #endif /* SUPPORT_MULTIPLE_BOARD_REV */
92 
93 int dhd_dongle_memsize;
94 int dhd_dongle_ramsize;
95 static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
96 static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
97 #if defined(DHD_FW_COREDUMP)
98 struct dhd_bus *g_dhd_bus = NULL;
99 static int dhdpcie_mem_dump(dhd_bus_t *bus);
100 #endif /* DHD_FW_COREDUMP */
101 
102 static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
103 static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid,
104 	const char *name, void *params,
105 	int plen, void *arg, int len, int val_size);
106 static int dhdpcie_bus_lpback_req(struct  dhd_bus *bus, uint32 intval);
107 static int dhdpcie_bus_dmaxfer_req(struct  dhd_bus *bus,
108 	uint32 len, uint32 srcdelay, uint32 destdelay, uint32 d11_lpbk);
109 static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
110 static int _dhdpcie_download_firmware(struct dhd_bus *bus);
111 static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
112 static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
113 static bool dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
114 static bool dhdpci_bus_read_frames(dhd_bus_t *bus);
115 static int dhdpcie_readshared(dhd_bus_t *bus);
116 static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
117 static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
118 static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size);
119 static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
120 	bool dongle_isolation, bool reset_flag);
121 static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
122 static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
123 static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
124 static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
125 static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
126 static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset);
127 static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
128 static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
129 #ifdef DHD_SUPPORT_64BIT
130 static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
131 static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset);
132 #endif /* DHD_SUPPORT_64BIT */
133 static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
134 static void dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size);
135 static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
136 static void dhdpcie_fw_trap(dhd_bus_t *bus);
137 static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
138 extern void dhd_dpc_enable(dhd_pub_t *dhdp);
139 extern void dhd_dpc_kill(dhd_pub_t *dhdp);
140 
141 #ifdef IDLE_TX_FLOW_MGMT
142 static void dhd_bus_check_idle_scan(dhd_bus_t *bus);
143 static void dhd_bus_idle_scan(dhd_bus_t *bus);
144 #endif /* IDLE_TX_FLOW_MGMT */
145 
146 #ifdef BCMEMBEDIMAGE
147 static int dhdpcie_download_code_array(dhd_bus_t *bus);
148 #endif /* BCMEMBEDIMAGE */
149 
150 
151 #ifdef EXYNOS_PCIE_DEBUG
152 extern void exynos_pcie_register_dump(int ch_num);
153 #endif /* EXYNOS_PCIE_DEBUG */
154 
155 #define     PCI_VENDOR_ID_BROADCOM          0x14e4
156 
157 #define DHD_DEFAULT_DOORBELL_TIMEOUT 200	/* ms */
158 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
159 static uint dhd_doorbell_timeout = DHD_DEFAULT_DOORBELL_TIMEOUT;
160 #endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
161 static bool dhdpcie_check_firmware_compatible(uint32 f_api_version, uint32 h_api_version);
162 static void dhdpcie_cto_error_recovery(struct dhd_bus *bus);
163 
164 #ifdef BCM_ASLR_HEAP
165 static void dhdpcie_wrt_rnd(struct dhd_bus *bus);
166 #endif /* BCM_ASLR_HEAP */
167 
168 extern uint16 dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd);
169 extern void dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost);
170 
171 /* IOVar table */
172 enum {
173 	IOV_INTR = 1,
174 	IOV_MEMSIZE,
175 	IOV_SET_DOWNLOAD_STATE,
176 	IOV_DEVRESET,
177 	IOV_VARS,
178 	IOV_MSI_SIM,
179 	IOV_PCIE_LPBK,
180 	IOV_CC_NVMSHADOW,
181 	IOV_RAMSIZE,
182 	IOV_RAMSTART,
183 	IOV_SLEEP_ALLOWED,
184 	IOV_PCIE_DMAXFER,
185 	IOV_PCIE_SUSPEND,
186 	IOV_DONGLEISOLATION,
187 	IOV_LTRSLEEPON_UNLOOAD,
188 	IOV_METADATA_DBG,
189 	IOV_RX_METADATALEN,
190 	IOV_TX_METADATALEN,
191 	IOV_TXP_THRESHOLD,
192 	IOV_BUZZZ_DUMP,
193 	IOV_DUMP_RINGUPD_BLOCK,
194 	IOV_DMA_RINGINDICES,
195 	IOV_FORCE_FW_TRAP,
196 	IOV_DB1_FOR_MB,
197 	IOV_FLOW_PRIO_MAP,
198 #ifdef DHD_PCIE_RUNTIMEPM
199 	IOV_IDLETIME,
200 #endif /* DHD_PCIE_RUNTIMEPM */
201 	IOV_RXBOUND,
202 	IOV_TXBOUND,
203 	IOV_HANGREPORT,
204 	IOV_H2D_MAILBOXDATA,
205 	IOV_INFORINGS,
206 	IOV_H2D_PHASE,
207 	IOV_H2D_ENABLE_TRAP_BADPHASE,
208 	IOV_H2D_TXPOST_MAX_ITEM,
209 	IOV_TRAPDATA,
210 	IOV_TRAPDATA_RAW,
211 	IOV_CTO_PREVENTION,
212 #ifdef PCIE_OOB
213 	IOV_OOB_BT_REG_ON,
214 	IOV_OOB_ENABLE,
215 #endif /* PCIE_OOB */
216 	IOV_PCIE_WD_RESET,
217 	IOV_CTO_THRESHOLD,
218 #ifdef DHD_EFI
219 	IOV_CONTROL_SIGNAL,
220 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
221 	IOV_DEEP_SLEEP,
222 #endif /* PCIE_OOB || PCIE_INB_DW */
223 #endif /* DHD_EFI */
224 #ifdef DEVICE_TX_STUCK_DETECT
225 	IOV_DEVICE_TX_STUCK_DETECT,
226 #endif /* DEVICE_TX_STUCK_DETECT */
227 	IOV_INB_DW_ENABLE,
228 	IOV_IDMA_ENABLE,
229 	IOV_IFRM_ENABLE,
230 	IOV_CLEAR_RING,
231 #ifdef DHD_EFI
232 	IOV_WIFI_PROPERTIES,
233 	IOV_OTP_DUMP
234 #endif
235 };
236 
237 
238 const bcm_iovar_t dhdpcie_iovars[] = {
239 	{"intr",	IOV_INTR,	0,	0, IOVT_BOOL,	0 },
240 	{"memsize",	IOV_MEMSIZE,	0,	0, IOVT_UINT32,	0 },
241 	{"dwnldstate",	IOV_SET_DOWNLOAD_STATE,	0,	0, IOVT_BOOL,	0 },
242 	{"vars",	IOV_VARS,	0,	0, IOVT_BUFFER,	0 },
243 	{"devreset",	IOV_DEVRESET,	0,	0, IOVT_BOOL,	0 },
244 	{"pcie_device_trap", IOV_FORCE_FW_TRAP, 0,	0, 0,	0 },
245 	{"pcie_lpbk",	IOV_PCIE_LPBK,	0,	0, IOVT_UINT32,	0 },
246 	{"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, 0, IOVT_BUFFER, 0 },
247 	{"ramsize",	IOV_RAMSIZE,	0,	0, IOVT_UINT32,	0 },
248 	{"ramstart",	IOV_RAMSTART,	0,	0, IOVT_UINT32,	0 },
249 	{"pcie_dmaxfer",	IOV_PCIE_DMAXFER,	0,	0, IOVT_BUFFER,	3 * sizeof(int32) },
250 	{"pcie_suspend", IOV_PCIE_SUSPEND,	0,	0, IOVT_UINT32,	0 },
251 #ifdef PCIE_OOB
252 	{"oob_bt_reg_on", IOV_OOB_BT_REG_ON,    0, 0,  IOVT_UINT32,    0 },
253 	{"oob_enable",   IOV_OOB_ENABLE,    0, 0,  IOVT_UINT32,    0 },
254 #endif /* PCIE_OOB */
255 	{"sleep_allowed",	IOV_SLEEP_ALLOWED,	0,	0, IOVT_BOOL,	0 },
256 	{"dngl_isolation", IOV_DONGLEISOLATION,	0,	0, IOVT_UINT32,	0 },
257 	{"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD,	0,	0, IOVT_UINT32,	0 },
258 	{"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK,	0,	0, IOVT_BUFFER,	0 },
259 	{"dma_ring_indices", IOV_DMA_RINGINDICES,	0,	0, IOVT_UINT32,	0},
260 	{"metadata_dbg", IOV_METADATA_DBG,	0,	0, IOVT_BOOL,	0 },
261 	{"rx_metadata_len", IOV_RX_METADATALEN,	0,	0, IOVT_UINT32,	0 },
262 	{"tx_metadata_len", IOV_TX_METADATALEN,	0,	0, IOVT_UINT32,	0 },
263 	{"db1_for_mb", IOV_DB1_FOR_MB,	0,	0, IOVT_UINT32,	0 },
264 	{"txp_thresh", IOV_TXP_THRESHOLD,	0,	0, IOVT_UINT32,	0 },
265 	{"buzzz_dump", IOV_BUZZZ_DUMP,		0,	0, IOVT_UINT32,	0 },
266 	{"flow_prio_map", IOV_FLOW_PRIO_MAP,	0,	0, IOVT_UINT32,	0 },
267 #ifdef DHD_PCIE_RUNTIMEPM
268 	{"idletime",    IOV_IDLETIME,   0, 0,      IOVT_INT32,     0 },
269 #endif /* DHD_PCIE_RUNTIMEPM */
270 	{"rxbound",     IOV_RXBOUND,    0, 0,      IOVT_UINT32,    0 },
271 	{"txbound",     IOV_TXBOUND,    0, 0,      IOVT_UINT32,    0 },
272 	{"fw_hang_report", IOV_HANGREPORT,	0,	0, IOVT_BOOL,	0 },
273 	{"h2d_mb_data",     IOV_H2D_MAILBOXDATA,    0, 0,      IOVT_UINT32,    0 },
274 	{"inforings",   IOV_INFORINGS,    0, 0,      IOVT_UINT32,    0 },
275 	{"h2d_phase",   IOV_H2D_PHASE,    0, 0,      IOVT_UINT32,    0 },
276 	{"force_trap_bad_h2d_phase", IOV_H2D_ENABLE_TRAP_BADPHASE,    0, 0,
277 	IOVT_UINT32,    0 },
278 	{"h2d_max_txpost",   IOV_H2D_TXPOST_MAX_ITEM,    0, 0,      IOVT_UINT32,    0 },
279 	{"trap_data",	IOV_TRAPDATA,	0,	0,	IOVT_BUFFER,	0 },
280 	{"trap_data_raw",	IOV_TRAPDATA_RAW,	0, 0,	IOVT_BUFFER,	0 },
281 	{"cto_prevention",	IOV_CTO_PREVENTION,	0,	0, IOVT_UINT32,	0 },
282 	{"pcie_wd_reset",	IOV_PCIE_WD_RESET,	0,	0, IOVT_BOOL,	0 },
283 	{"cto_threshold",	IOV_CTO_THRESHOLD,	0,	0, IOVT_UINT32,	0 },
284 #ifdef DHD_EFI
285 	{"control_signal", IOV_CONTROL_SIGNAL,	0, 0, IOVT_UINT32, 0},
286 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
287 	{"deep_sleep", IOV_DEEP_SLEEP, 0, 0, IOVT_UINT32,    0},
288 #endif /* PCIE_OOB || PCIE_INB_DW */
289 #endif /* DHD_EFI */
290 	{"inb_dw_enable",   IOV_INB_DW_ENABLE,    0, 0,  IOVT_UINT32,    0 },
291 #ifdef DEVICE_TX_STUCK_DETECT
292 	{"dev_tx_stuck_monitor", IOV_DEVICE_TX_STUCK_DETECT, 0, 0, IOVT_UINT32, 0 },
293 #endif /* DEVICE_TX_STUCK_DETECT */
294 	{"idma_enable",   IOV_IDMA_ENABLE,    0, 0,  IOVT_UINT32,    0 },
295 	{"ifrm_enable",   IOV_IFRM_ENABLE,    0, 0,  IOVT_UINT32,    0 },
296 	{"clear_ring",   IOV_CLEAR_RING,    0, 0,  IOVT_UINT32,    0 },
297 #ifdef DHD_EFI
298 	{"properties", IOV_WIFI_PROPERTIES,	0, 0, IOVT_BUFFER, 0},
299 	{"otp_dump", IOV_OTP_DUMP,	0, 0, IOVT_BUFFER, 0},
300 #endif
301 	{NULL, 0, 0, 0, 0, 0 }
302 };
303 
304 
305 #define MAX_READ_TIMEOUT	5 * 1000 * 1000
306 
307 #ifndef DHD_RXBOUND
308 #define DHD_RXBOUND		64
309 #endif
310 #ifndef DHD_TXBOUND
311 #define DHD_TXBOUND		64
312 #endif
313 
314 #define DHD_INFORING_BOUND	32
315 
316 uint dhd_rxbound = DHD_RXBOUND;
317 uint dhd_txbound = DHD_TXBOUND;
318 
319 /**
320  * Register/Unregister functions are called by the main DHD entry point (eg module insertion) to
321  * link with the bus driver, in order to look for or await the device.
322  */
323 int
dhd_bus_register(void)324 dhd_bus_register(void)
325 {
326 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
327 
328 	return dhdpcie_bus_register();
329 }
330 
331 void
dhd_bus_unregister(void)332 dhd_bus_unregister(void)
333 {
334 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
335 
336 	dhdpcie_bus_unregister();
337 	return;
338 }
339 
340 
341 /** returns a host virtual address */
342 uint32 *
dhdpcie_bus_reg_map(osl_t * osh,ulong addr,int size)343 dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
344 {
345 	return (uint32 *)REG_MAP(addr, size);
346 }
347 
348 void
dhdpcie_bus_reg_unmap(osl_t * osh,volatile char * addr,int size)349 dhdpcie_bus_reg_unmap(osl_t *osh, volatile char *addr, int size)
350 {
351 	REG_UNMAP(addr);
352 	return;
353 }
354 
355 /**
356  * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
357  * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
358  * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
359  *
360  * 'tcm' is the *host* virtual address at which tcm is mapped.
361  */
dhdpcie_bus_attach(osl_t * osh,volatile char * regs,volatile char * tcm,void * pci_dev)362 dhd_bus_t* dhdpcie_bus_attach(osl_t *osh,
363 	volatile char *regs, volatile char *tcm, void *pci_dev)
364 {
365 	dhd_bus_t *bus;
366 
367 	DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
368 
369 	do {
370 		if (!(bus = MALLOCZ(osh, sizeof(dhd_bus_t)))) {
371 			DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
372 			break;
373 		}
374 
375 		bus->regs = regs;
376 		bus->tcm = tcm;
377 		bus->osh = osh;
378 		/* Save pci_dev into dhd_bus, as it may be needed in dhd_attach */
379 		bus->dev = (struct pci_dev *)pci_dev;
380 
381 
382 		dll_init(&bus->flowring_active_list);
383 #ifdef IDLE_TX_FLOW_MGMT
384 		bus->active_list_last_process_ts = OSL_SYSUPTIME();
385 #endif /* IDLE_TX_FLOW_MGMT */
386 
387 #ifdef DEVICE_TX_STUCK_DETECT
388 		/* Enable the Device stuck detection feature by default */
389 		bus->dev_tx_stuck_monitor = TRUE;
390 		bus->device_tx_stuck_check = OSL_SYSUPTIME();
391 #endif /* DEVICE_TX_STUCK_DETECT */
392 
393 		/* Attach pcie shared structure */
394 		if (!(bus->pcie_sh = MALLOCZ(osh, sizeof(pciedev_shared_t)))) {
395 			DHD_ERROR(("%s: MALLOC of bus->pcie_sh failed\n", __FUNCTION__));
396 			break;
397 		}
398 
399 		/* dhd_common_init(osh); */
400 
401 		if (dhdpcie_dongle_attach(bus)) {
402 			DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
403 			break;
404 		}
405 
406 		/* software resources */
407 		if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
408 			DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
409 
410 			break;
411 		}
412 		bus->dhd->busstate = DHD_BUS_DOWN;
413 		bus->db1_for_mb = TRUE;
414 		bus->dhd->hang_report = TRUE;
415 		bus->use_mailbox = FALSE;
416 		bus->use_d0_inform = FALSE;
417 #ifdef IDLE_TX_FLOW_MGMT
418 		bus->enable_idle_flowring_mgmt = FALSE;
419 #endif /* IDLE_TX_FLOW_MGMT */
420 		bus->irq_registered = FALSE;
421 
422 		DHD_TRACE(("%s: EXIT SUCCESS\n",
423 			__FUNCTION__));
424 #ifdef DHD_FW_COREDUMP
425 		g_dhd_bus = bus;
426 #endif
427 		return bus;
428 	} while (0);
429 
430 	DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
431 
432 	if (bus && bus->pcie_sh) {
433 		MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
434 	}
435 
436 	if (bus) {
437 		MFREE(osh, bus, sizeof(dhd_bus_t));
438 	}
439 	return NULL;
440 }
441 
442 uint
dhd_bus_chip(struct dhd_bus * bus)443 dhd_bus_chip(struct dhd_bus *bus)
444 {
445 	ASSERT(bus->sih != NULL);
446 	return bus->sih->chip;
447 }
448 
449 uint
dhd_bus_chiprev(struct dhd_bus * bus)450 dhd_bus_chiprev(struct dhd_bus *bus)
451 {
452 	ASSERT(bus);
453 	ASSERT(bus->sih != NULL);
454 	return bus->sih->chiprev;
455 }
456 
457 void *
dhd_bus_pub(struct dhd_bus * bus)458 dhd_bus_pub(struct dhd_bus *bus)
459 {
460 	return bus->dhd;
461 }
462 
463 const void *
dhd_bus_sih(struct dhd_bus * bus)464 dhd_bus_sih(struct dhd_bus *bus)
465 {
466 	return (const void *)bus->sih;
467 }
468 
469 void *
dhd_bus_txq(struct dhd_bus * bus)470 dhd_bus_txq(struct dhd_bus *bus)
471 {
472 	return &bus->txq;
473 }
474 
475 /** Get Chip ID version */
dhd_bus_chip_id(dhd_pub_t * dhdp)476 uint dhd_bus_chip_id(dhd_pub_t *dhdp)
477 {
478 	dhd_bus_t *bus = dhdp->bus;
479 	return  bus->sih->chip;
480 }
481 
482 /** Get Chip Rev ID version */
dhd_bus_chiprev_id(dhd_pub_t * dhdp)483 uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
484 {
485 	dhd_bus_t *bus = dhdp->bus;
486 	return bus->sih->chiprev;
487 }
488 
489 /** Get Chip Pkg ID version */
dhd_bus_chippkg_id(dhd_pub_t * dhdp)490 uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
491 {
492 	dhd_bus_t *bus = dhdp->bus;
493 	return bus->sih->chippkg;
494 }
495 
496 /** Read and clear intstatus. This should be called with interrupts disabled or inside isr */
497 uint32
dhdpcie_bus_intstatus(dhd_bus_t * bus)498 dhdpcie_bus_intstatus(dhd_bus_t *bus)
499 {
500 	uint32 intstatus = 0;
501 #ifndef DHD_READ_INTSTATUS_IN_DPC
502 	uint32 intmask = 0;
503 #endif /* DHD_READ_INTSTATUS_IN_DPC */
504 
505 	if ((bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) &&
506 		bus->wait_for_d3_ack) {
507 #ifdef DHD_EFI
508 		DHD_INFO(("%s: trying to clear intstatus during suspend (%d)"
509 			" or suspend in progress %d\n",
510 			__FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
511 #else
512 		DHD_ERROR(("%s: trying to clear intstatus during suspend (%d)"
513 			" or suspend in progress %d\n",
514 			__FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
515 #endif /* !DHD_EFI */
516 		return intstatus;
517 	}
518 	if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
519 		(bus->sih->buscorerev == 2)) {
520 		intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
521 		dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
522 		intstatus &= I_MB;
523 	} else {
524 		/* this is a PCIE core register..not a config register... */
525 		intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
526 
527 #ifndef DHD_READ_INTSTATUS_IN_DPC
528 		/* this is a PCIE core register..not a config register... */
529 		intmask = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
530 
531 		intstatus &= intmask;
532 #endif /* DHD_READ_INTSTATUS_IN_DPC */
533 		/* Is device removed. intstatus & intmask read 0xffffffff */
534 		if (intstatus == (uint32)-1) {
535 			DHD_ERROR(("%s: Device is removed or Link is down.\n", __FUNCTION__));
536 #ifdef CUSTOMER_HW4_DEBUG
537 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
538 			bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
539 			dhd_os_send_hang_message(bus->dhd);
540 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
541 #endif /* CUSTOMER_HW4_DEBUG */
542 			return intstatus;
543 		}
544 
545 
546 		/*
547 		 * The fourth argument to si_corereg is the "mask" fields of the register to update
548 		 * and the fifth field is the "value" to update. Now if we are interested in only
549 		 * few fields of the "mask" bit map, we should not be writing back what we read
550 		 * By doing so, we might clear/ack interrupts that are not handled yet.
551 		 */
552 		si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, bus->def_intmask,
553 			intstatus);
554 
555 		intstatus &= bus->def_intmask;
556 	}
557 
558 	return intstatus;
559 }
560 
561 /**
562  * Name:  dhdpcie_bus_isr
563  * Parameters:
564  * 1: IN int irq   -- interrupt vector
565  * 2: IN void *arg      -- handle to private data structure
566  * Return value:
567  * Status (TRUE or FALSE)
568  *
569  * Description:
570  * Interrupt Service routine checks for the status register,
571  * disable interrupt and queue DPC if mail box interrupts are raised.
572  */
573 int32
dhdpcie_bus_isr(dhd_bus_t * bus)574 dhdpcie_bus_isr(dhd_bus_t *bus)
575 {
576 	uint32 intstatus = 0;
577 
578 	do {
579 		DHD_TRACE(("%s: Enter\n", __FUNCTION__));
580 		/* verify argument */
581 		if (!bus) {
582 			DHD_ERROR(("%s : bus is null pointer, exit \n", __FUNCTION__));
583 			break;
584 		}
585 
586 		if (bus->dhd->dongle_reset) {
587 			break;
588 		}
589 
590 		if (bus->dhd->busstate == DHD_BUS_DOWN) {
591 			break;
592 		}
593 
594 
595 		if (PCIECTO_ENAB(bus->dhd)) {
596 			/* read pci_intstatus */
597 			intstatus = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_STATUS, 4);
598 
599 			if (intstatus & PCI_CTO_INT_MASK) {
600 				/* reset backplane and cto,
601 				 *  then access through pcie is recovered.
602 				 */
603 				dhdpcie_cto_error_recovery(bus);
604 				return TRUE;
605 			}
606 		}
607 
608 #ifndef DHD_READ_INTSTATUS_IN_DPC
609 		intstatus = dhdpcie_bus_intstatus(bus);
610 
611 		/* Check if the interrupt is ours or not */
612 		if (intstatus == 0) {
613 			break;
614 		}
615 
616 		/* save the intstatus */
617 		/* read interrupt status register!! Status bits will be cleared in DPC !! */
618 		bus->intstatus = intstatus;
619 
620 		/* return error for 0xFFFFFFFF */
621 		if (intstatus == (uint32)-1) {
622 			dhdpcie_disable_irq_nosync(bus);
623 			bus->is_linkdown = TRUE;
624 			return BCME_ERROR;
625 		}
626 
627 		/*  Overall operation:
628 		 *    - Mask further interrupts
629 		 *    - Read/ack intstatus
630 		 *    - Take action based on bits and state
631 		 *    - Reenable interrupts (as per state)
632 		 */
633 
634 		/* Count the interrupt call */
635 		bus->intrcount++;
636 #endif /* DHD_READ_INTSTATUS_IN_DPC */
637 
638 		bus->ipend = TRUE;
639 
640 		bus->isr_intr_disable_count++;
641 		dhdpcie_bus_intr_disable(bus); /* Disable interrupt using IntMask!! */
642 
643 		bus->intdis = TRUE;
644 
645 #if defined(PCIE_ISR_THREAD)
646 
647 		DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
648 		DHD_OS_WAKE_LOCK(bus->dhd);
649 		while (dhd_bus_dpc(bus));
650 		DHD_OS_WAKE_UNLOCK(bus->dhd);
651 #else
652 		bus->dpc_sched = TRUE;
653 		dhd_sched_dpc(bus->dhd);     /* queue DPC now!! */
654 #endif /* defined(SDIO_ISR_THREAD) */
655 
656 		DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__));
657 		return TRUE;
658 
659 	} while (0);
660 
661 	DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__));
662 	return FALSE;
663 }
664 
665 int
dhdpcie_set_pwr_state(dhd_bus_t * bus,uint state)666 dhdpcie_set_pwr_state(dhd_bus_t *bus, uint state)
667 {
668 	uint32 cur_state = 0;
669 	uint32 pm_csr = 0;
670 	osl_t *osh = bus->osh;
671 
672 	pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
673 	cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
674 
675 	if (cur_state == state) {
676 		DHD_ERROR(("%s: Already in state %u \n", __FUNCTION__, cur_state));
677 		return BCME_OK;
678 	}
679 
680 	if (state > PCIECFGREG_PM_CSR_STATE_D3_HOT)
681 		return BCME_ERROR;
682 
683 	/* Validate the state transition
684 	* if already in a lower power state, return error
685 	*/
686 	if (state != PCIECFGREG_PM_CSR_STATE_D0 &&
687 			cur_state <= PCIECFGREG_PM_CSR_STATE_D3_COLD &&
688 			cur_state > state) {
689 		DHD_ERROR(("%s: Invalid power state transition !\n", __FUNCTION__));
690 		return BCME_ERROR;
691 	}
692 
693 	pm_csr &= ~PCIECFGREG_PM_CSR_STATE_MASK;
694 	pm_csr |= state;
695 
696 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32), pm_csr);
697 
698 	/* need to wait for the specified mandatory pcie power transition delay time */
699 	if (state == PCIECFGREG_PM_CSR_STATE_D3_HOT ||
700 			cur_state == PCIECFGREG_PM_CSR_STATE_D3_HOT)
701 			OSL_DELAY(DHDPCIE_PM_D3_DELAY);
702 	else if (state == PCIECFGREG_PM_CSR_STATE_D2 ||
703 			cur_state == PCIECFGREG_PM_CSR_STATE_D2)
704 			OSL_DELAY(DHDPCIE_PM_D2_DELAY);
705 
706 	/* read back the power state and verify */
707 	pm_csr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
708 	cur_state = pm_csr & PCIECFGREG_PM_CSR_STATE_MASK;
709 	if (cur_state != state) {
710 		DHD_ERROR(("%s: power transition failed ! Current state is %u \n",
711 				__FUNCTION__, cur_state));
712 		return BCME_ERROR;
713 	} else {
714 		DHD_ERROR(("%s: power transition to %u success \n",
715 				__FUNCTION__, cur_state));
716 	}
717 
718 	return BCME_OK;
719 
720 }
721 
722 int
dhdpcie_config_check(dhd_bus_t * bus)723 dhdpcie_config_check(dhd_bus_t *bus)
724 {
725 	uint32 i, val;
726 	int ret = BCME_ERROR;
727 
728 	for (i = 0; i < DHDPCIE_CONFIG_CHECK_RETRY_COUNT; i++) {
729 		val = OSL_PCI_READ_CONFIG(bus->osh, PCI_CFG_VID, sizeof(uint32));
730 		if ((val & 0xFFFF) == VENDOR_BROADCOM) {
731 			ret = BCME_OK;
732 			break;
733 		}
734 		OSL_DELAY(DHDPCIE_CONFIG_CHECK_DELAY_MS * 1000);
735 	}
736 
737 	return ret;
738 }
739 
740 int
dhdpcie_config_restore(dhd_bus_t * bus,bool restore_pmcsr)741 dhdpcie_config_restore(dhd_bus_t *bus,  bool restore_pmcsr)
742 {
743 	uint32 i;
744 	osl_t *osh = bus->osh;
745 
746 	if (BCME_OK != dhdpcie_config_check(bus)) {
747 		return BCME_ERROR;
748 	}
749 
750 	for (i = PCI_CFG_REV >> 2; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
751 		OSL_PCI_WRITE_CONFIG(osh, i << 2, sizeof(uint32), bus->saved_config.header[i]);
752 	}
753 	OSL_PCI_WRITE_CONFIG(osh, PCI_CFG_CMD, sizeof(uint32), bus->saved_config.header[1]);
754 
755 	if (restore_pmcsr)
756 		OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PM_CSR,
757 				sizeof(uint32), bus->saved_config.pmcsr);
758 
759 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_CAP, sizeof(uint32), bus->saved_config.msi_cap);
760 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_L, sizeof(uint32),
761 			bus->saved_config.msi_addr0);
762 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
763 			sizeof(uint32), bus->saved_config.msi_addr1);
764 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_MSI_DATA,
765 			sizeof(uint32), bus->saved_config.msi_data);
766 
767 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_DEV_STATUS_CTRL,
768 			sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat);
769 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGGEN_DEV_STATUS_CTRL2,
770 			sizeof(uint32), bus->saved_config.exp_dev_ctrl_stat2);
771 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL,
772 			sizeof(uint32), bus->saved_config.exp_link_ctrl_stat);
773 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_LINK_STATUS_CTRL2,
774 			sizeof(uint32), bus->saved_config.exp_link_ctrl_stat2);
775 
776 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
777 			sizeof(uint32), bus->saved_config.l1pm0);
778 	OSL_PCI_WRITE_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
779 			sizeof(uint32), bus->saved_config.l1pm1);
780 
781 	OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN,
782 			sizeof(uint32), bus->saved_config.bar0_win);
783 	OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR1_WIN,
784 			sizeof(uint32), bus->saved_config.bar1_win);
785 
786 	return BCME_OK;
787 }
788 
789 int
dhdpcie_config_save(dhd_bus_t * bus)790 dhdpcie_config_save(dhd_bus_t *bus)
791 {
792 	uint32 i;
793 	osl_t *osh = bus->osh;
794 
795 	if (BCME_OK != dhdpcie_config_check(bus)) {
796 		return BCME_ERROR;
797 	}
798 
799 	for (i = 0; i < DHDPCIE_CONFIG_HDR_SIZE; i++) {
800 		bus->saved_config.header[i] = OSL_PCI_READ_CONFIG(osh, i << 2, sizeof(uint32));
801 	}
802 
803 	bus->saved_config.pmcsr = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PM_CSR, sizeof(uint32));
804 
805 	bus->saved_config.msi_cap = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_CAP,
806 			sizeof(uint32));
807 	bus->saved_config.msi_addr0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_L,
808 			sizeof(uint32));
809 	bus->saved_config.msi_addr1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_ADDR_H,
810 			sizeof(uint32));
811 	bus->saved_config.msi_data = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_MSI_DATA,
812 			sizeof(uint32));
813 
814 	bus->saved_config.exp_dev_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
815 			PCIECFGREG_DEV_STATUS_CTRL, sizeof(uint32));
816 	bus->saved_config.exp_dev_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
817 			PCIECFGGEN_DEV_STATUS_CTRL2, sizeof(uint32));
818 	bus->saved_config.exp_link_ctrl_stat = OSL_PCI_READ_CONFIG(osh,
819 			PCIECFGREG_LINK_STATUS_CTRL, sizeof(uint32));
820 	bus->saved_config.exp_link_ctrl_stat2 = OSL_PCI_READ_CONFIG(osh,
821 			PCIECFGREG_LINK_STATUS_CTRL2, sizeof(uint32));
822 
823 	bus->saved_config.l1pm0 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL1,
824 			sizeof(uint32));
825 	bus->saved_config.l1pm1 = OSL_PCI_READ_CONFIG(osh, PCIECFGREG_PML1_SUB_CTRL2,
826 			sizeof(uint32));
827 
828 	bus->saved_config.bar0_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR0_WIN,
829 			sizeof(uint32));
830 	bus->saved_config.bar1_win = OSL_PCI_READ_CONFIG(osh, PCI_BAR1_WIN,
831 			sizeof(uint32));
832 	return BCME_OK;
833 }
834 
835 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
836 dhd_pub_t *link_recovery = NULL;
837 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
838 static bool
dhdpcie_dongle_attach(dhd_bus_t * bus)839 dhdpcie_dongle_attach(dhd_bus_t *bus)
840 {
841 
842 	osl_t *osh = bus->osh;
843 	volatile void *regsva = (volatile void*)bus->regs;
844 	uint16 devid = bus->cl_devid;
845 	uint32 val;
846 	sbpcieregs_t *sbpcieregs;
847 
848 	DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
849 
850 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
851 	link_recovery = bus->dhd;
852 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
853 
854 	bus->alp_only = TRUE;
855 	bus->sih = NULL;
856 
857 	/* Set bar0 window to si_enum_base */
858 	dhdpcie_bus_cfg_set_bar0_win(bus, SI_ENUM_BASE);
859 
860 	/* Checking PCIe bus status with reading configuration space */
861 	val = OSL_PCI_READ_CONFIG(osh, PCI_CFG_VID, sizeof(uint32));
862 	if ((val & 0xFFFF) != VENDOR_BROADCOM) {
863 		DHD_ERROR(("%s : failed to read PCI configuration space!\n", __FUNCTION__));
864 		goto fail;
865 	}
866 
867 	/*
868 	 * Checking PCI_SPROM_CONTROL register for preventing invalid address access
869 	 * due to switch address space from PCI_BUS to SI_BUS.
870 	 */
871 	val = OSL_PCI_READ_CONFIG(osh, PCI_SPROM_CONTROL, sizeof(uint32));
872 	if (val == 0xffffffff) {
873 		DHD_ERROR(("%s : failed to read SPROM control register\n", __FUNCTION__));
874 		goto fail;
875 	}
876 
877 #ifdef DHD_EFI
878 	/* Save good copy of PCIe config space */
879 	if (BCME_OK != dhdpcie_config_save(bus)) {
880 		DHD_ERROR(("%s : failed to save PCI configuration space!\n", __FUNCTION__));
881 		goto fail;
882 	}
883 #endif /* DHD_EFI */
884 
885 	/* si_attach() will provide an SI handle and scan the backplane */
886 	if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
887 	                           &bus->vars, &bus->varsz))) {
888 		DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
889 		goto fail;
890 	}
891 
892 	/* Olympic EFI requirement - stop driver load if FW is already running
893 	*  need to do this here before pcie_watchdog_reset, because
894 	*  pcie_watchdog_reset will put the ARM back into halt state
895 	*/
896 	if (!dhdpcie_is_arm_halted(bus)) {
897 		DHD_ERROR(("%s: ARM is not halted,FW is already running! Abort.\n",
898 				__FUNCTION__));
899 		goto fail;
900 	}
901 
902 	/* Enable CLKREQ# */
903 	dhdpcie_clkreq(bus->osh, 1, 1);
904 
905 #ifndef DONGLE_ENABLE_ISOLATION
906 	/*
907 	 * Issue CC watchdog to reset all the cores on the chip - similar to rmmod dhd
908 	 * This is required to avoid spurious interrupts to the Host and bring back
909 	 * dongle to a sane state (on host soft-reboot / watchdog-reboot).
910 	 */
911 	pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *) bus->regs);
912 #endif /* !DONGLE_ENABLE_ISOLATION */
913 
914 #ifdef DHD_EFI
915 	dhdpcie_dongle_pwr_toggle(bus);
916 #endif
917 
918 	si_setcore(bus->sih, PCIE2_CORE_ID, 0);
919 	sbpcieregs = (sbpcieregs_t*)(bus->regs);
920 
921 	/* WAR where the BAR1 window may not be sized properly */
922 	W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
923 	val = R_REG(osh, &sbpcieregs->configdata);
924 	W_REG(osh, &sbpcieregs->configdata, val);
925 
926 	/* Get info on the ARM and SOCRAM cores... */
927 	/* Should really be qualified by device id */
928 	if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
929 	    (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
930 	    (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) ||
931 	    (si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
932 		bus->armrev = si_corerev(bus->sih);
933 	} else {
934 		DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
935 		goto fail;
936 	}
937 
938 	if (si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
939 		/* Only set dongle RAMSIZE to default value when ramsize is not adjusted */
940 		if (!bus->ramsize_adjusted) {
941 			if (!(bus->orig_ramsize = si_sysmem_size(bus->sih))) {
942 				DHD_ERROR(("%s: failed to find SYSMEM memory!\n", __FUNCTION__));
943 				goto fail;
944 			}
945 			/* also populate base address */
946 			bus->dongle_ram_base = CA7_4365_RAM_BASE;
947 			/* Default reserve 1.75MB for CA7 */
948 			bus->orig_ramsize = 0x1c0000;
949 		}
950 	} else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
951 		if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
952 			DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
953 			goto fail;
954 		}
955 	} else {
956 		/* cr4 has a different way to find the RAM size from TCM's */
957 		if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
958 			DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
959 			goto fail;
960 		}
961 		/* also populate base address */
962 		switch ((uint16)bus->sih->chip) {
963 		case BCM4339_CHIP_ID:
964 		case BCM4335_CHIP_ID:
965 			bus->dongle_ram_base = CR4_4335_RAM_BASE;
966 			break;
967 		case BCM4358_CHIP_ID:
968 		case BCM4354_CHIP_ID:
969 		case BCM43567_CHIP_ID:
970 		case BCM43569_CHIP_ID:
971 		case BCM4350_CHIP_ID:
972 		case BCM43570_CHIP_ID:
973 			bus->dongle_ram_base = CR4_4350_RAM_BASE;
974 			break;
975 		case BCM4360_CHIP_ID:
976 			bus->dongle_ram_base = CR4_4360_RAM_BASE;
977 			break;
978 
979 		case BCM4364_CHIP_ID:
980 			bus->dongle_ram_base = CR4_4364_RAM_BASE;
981 			break;
982 
983 		CASE_BCM4345_CHIP:
984 			bus->dongle_ram_base = (bus->sih->chiprev < 6)  /* changed at 4345C0 */
985 				? CR4_4345_LT_C0_RAM_BASE : CR4_4345_GE_C0_RAM_BASE;
986 			break;
987 		CASE_BCM43602_CHIP:
988 			bus->dongle_ram_base = CR4_43602_RAM_BASE;
989 			break;
990 		case BCM4349_CHIP_GRPID:
991 			/* RAM based changed from 4349c0(revid=9) onwards */
992 			bus->dongle_ram_base = ((bus->sih->chiprev < 9) ?
993 				CR4_4349_RAM_BASE : CR4_4349_RAM_BASE_FROM_REV_9);
994 			break;
995 		case BCM4347_CHIP_GRPID:
996 			bus->dongle_ram_base = CR4_4347_RAM_BASE;
997 			break;
998 		case BCM4362_CHIP_ID:
999 			bus->dongle_ram_base = CR4_4362_RAM_BASE;
1000 			break;
1001 		default:
1002 			bus->dongle_ram_base = 0;
1003 			DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
1004 			           __FUNCTION__, bus->dongle_ram_base));
1005 		}
1006 	}
1007 	bus->ramsize = bus->orig_ramsize;
1008 	if (dhd_dongle_memsize)
1009 		dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize);
1010 
1011 	DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
1012 	           bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
1013 
1014 	bus->srmemsize = si_socram_srmem_size(bus->sih);
1015 
1016 
1017 	bus->def_intmask = PCIE_MB_D2H_MB_MASK | PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
1018 
1019 	/* Set the poll and/or interrupt flags */
1020 	bus->intr = (bool)dhd_intr;
1021 	if ((bus->poll = (bool)dhd_poll))
1022 		bus->pollrate = 1;
1023 
1024 	bus->wait_for_d3_ack = 1;
1025 #ifdef PCIE_OOB
1026 	dhdpcie_oob_init(bus);
1027 #endif /* PCIE_OOB */
1028 #ifdef PCIE_INB_DW
1029 	bus->inb_enabled = TRUE;
1030 #endif /* PCIE_INB_DW */
1031 	bus->dongle_in_ds = FALSE;
1032 	bus->idma_enabled = TRUE;
1033 	bus->ifrm_enabled = TRUE;
1034 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
1035 	bus->ds_enabled = TRUE;
1036 #endif
1037 	DHD_TRACE(("%s: EXIT: SUCCESS\n", __FUNCTION__));
1038 	return 0;
1039 
1040 fail:
1041 	if (bus->sih != NULL) {
1042 		si_detach(bus->sih);
1043 		bus->sih = NULL;
1044 	}
1045 	DHD_TRACE(("%s: EXIT: FAILURE\n", __FUNCTION__));
1046 	return -1;
1047 }
1048 
1049 int
dhpcie_bus_unmask_interrupt(dhd_bus_t * bus)1050 dhpcie_bus_unmask_interrupt(dhd_bus_t *bus)
1051 {
1052 	dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB);
1053 	return 0;
1054 }
1055 int
dhpcie_bus_mask_interrupt(dhd_bus_t * bus)1056 dhpcie_bus_mask_interrupt(dhd_bus_t *bus)
1057 {
1058 	dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0);
1059 	return 0;
1060 }
1061 
1062 void
dhdpcie_bus_intr_enable(dhd_bus_t * bus)1063 dhdpcie_bus_intr_enable(dhd_bus_t *bus)
1064 {
1065 	DHD_TRACE(("%s Enter\n", __FUNCTION__));
1066 	if (bus && bus->sih && !bus->is_linkdown) {
1067 		if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
1068 			(bus->sih->buscorerev == 4)) {
1069 			dhpcie_bus_unmask_interrupt(bus);
1070 		} else {
1071 			/* Skip after recieving D3 ACK */
1072 			if ((bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) &&
1073 				bus->wait_for_d3_ack) {
1074 				return;
1075 			}
1076 			si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
1077 				bus->def_intmask, bus->def_intmask);
1078 		}
1079 	}
1080 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
1081 }
1082 
1083 void
dhdpcie_bus_intr_disable(dhd_bus_t * bus)1084 dhdpcie_bus_intr_disable(dhd_bus_t *bus)
1085 {
1086 	DHD_TRACE(("%s Enter\n", __FUNCTION__));
1087 	if (bus && bus->sih && !bus->is_linkdown) {
1088 		if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
1089 			(bus->sih->buscorerev == 4)) {
1090 			dhpcie_bus_mask_interrupt(bus);
1091 		} else {
1092 			/* Skip after recieving D3 ACK */
1093 			if ((bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) &&
1094 				bus->wait_for_d3_ack) {
1095 				return;
1096 			}
1097 			si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
1098 				bus->def_intmask, 0);
1099 		}
1100 	}
1101 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
1102 }
1103 
1104 /*
1105  *  dhdpcie_advertise_bus_cleanup advertises that clean up is under progress
1106  * to other bus user contexts like Tx, Rx, IOVAR, WD etc and it waits for other contexts
1107  * to gracefully exit. All the bus usage contexts before marking busstate as busy, will check for
1108  * whether the busstate is DHD_BUS_DOWN or DHD_BUS_DOWN_IN_PROGRESS, if so
1109  * they will exit from there itself without marking dhd_bus_busy_state as BUSY.
1110  */
1111 static void
dhdpcie_advertise_bus_cleanup(dhd_pub_t * dhdp)1112 dhdpcie_advertise_bus_cleanup(dhd_pub_t	 *dhdp)
1113 {
1114 	unsigned long flags;
1115 	int timeleft;
1116 
1117 	DHD_GENERAL_LOCK(dhdp, flags);
1118 	dhdp->busstate = DHD_BUS_DOWN_IN_PROGRESS;
1119 	DHD_GENERAL_UNLOCK(dhdp, flags);
1120 
1121 	timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
1122 	if ((timeleft == 0) || (timeleft == 1)) {
1123 		DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
1124 				__FUNCTION__, dhdp->dhd_bus_busy_state));
1125 		ASSERT(0);
1126 	}
1127 
1128 	return;
1129 }
1130 
1131 static void
dhdpcie_advertise_bus_remove(dhd_pub_t * dhdp)1132 dhdpcie_advertise_bus_remove(dhd_pub_t	 *dhdp)
1133 {
1134 	unsigned long flags;
1135 	int timeleft;
1136 
1137 	DHD_GENERAL_LOCK(dhdp, flags);
1138 	dhdp->busstate = DHD_BUS_REMOVE;
1139 	DHD_GENERAL_UNLOCK(dhdp, flags);
1140 
1141 	timeleft = dhd_os_busbusy_wait_negation(dhdp, &dhdp->dhd_bus_busy_state);
1142 	if ((timeleft == 0) || (timeleft == 1)) {
1143 		DHD_ERROR(("%s : Timeout due to dhd_bus_busy_state=0x%x\n",
1144 				__FUNCTION__, dhdp->dhd_bus_busy_state));
1145 		ASSERT(0);
1146 	}
1147 
1148 	return;
1149 }
1150 
1151 
1152 static void
dhdpcie_bus_remove_prep(dhd_bus_t * bus)1153 dhdpcie_bus_remove_prep(dhd_bus_t *bus)
1154 {
1155 	unsigned long flags;
1156 	DHD_TRACE(("%s Enter\n", __FUNCTION__));
1157 
1158 	DHD_GENERAL_LOCK(bus->dhd, flags);
1159 	bus->dhd->busstate = DHD_BUS_DOWN;
1160 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
1161 
1162 #ifdef PCIE_INB_DW
1163 	/* De-Initialize the lock to serialize Device Wake Inband activities */
1164 	if (bus->inb_lock) {
1165 		dhd_os_spin_lock_deinit(bus->dhd->osh, bus->inb_lock);
1166 		bus->inb_lock = NULL;
1167 	}
1168 #endif
1169 
1170 
1171 	dhd_os_sdlock(bus->dhd);
1172 
1173 	if (bus->sih && !bus->dhd->dongle_isolation) {
1174 		/* Has insmod fails after rmmod issue in Brix Android */
1175 		/* if the pcie link is down, watchdog reset should not be done, as it may hang */
1176 		if (!bus->is_linkdown)
1177 			pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *) bus->regs);
1178 		else
1179 			DHD_ERROR(("%s: skipping watchdog reset, due to pcie link down ! \n",
1180 					__FUNCTION__));
1181 
1182 		bus->dhd->is_pcie_watchdog_reset = TRUE;
1183 	}
1184 
1185 	dhd_os_sdunlock(bus->dhd);
1186 
1187 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
1188 }
1189 
1190 /** Detach and free everything */
1191 void
dhdpcie_bus_release(dhd_bus_t * bus)1192 dhdpcie_bus_release(dhd_bus_t *bus)
1193 {
1194 	bool dongle_isolation = FALSE;
1195 	osl_t *osh = NULL;
1196 
1197 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1198 
1199 	if (bus) {
1200 
1201 		osh = bus->osh;
1202 		ASSERT(osh);
1203 
1204 		if (bus->dhd) {
1205 			dhdpcie_advertise_bus_remove(bus->dhd);
1206 			dongle_isolation = bus->dhd->dongle_isolation;
1207 			bus->dhd->is_pcie_watchdog_reset = FALSE;
1208 			dhdpcie_bus_remove_prep(bus);
1209 
1210 			if (bus->intr) {
1211 				dhdpcie_bus_intr_disable(bus);
1212 				dhdpcie_free_irq(bus);
1213 			}
1214 			dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
1215 			dhd_detach(bus->dhd);
1216 			dhd_free(bus->dhd);
1217 			bus->dhd = NULL;
1218 		}
1219 
1220 		/* unmap the regs and tcm here!! */
1221 		if (bus->regs) {
1222 			dhdpcie_bus_reg_unmap(osh, bus->regs, DONGLE_REG_MAP_SIZE);
1223 			bus->regs = NULL;
1224 		}
1225 		if (bus->tcm) {
1226 			dhdpcie_bus_reg_unmap(osh, bus->tcm, DONGLE_TCM_MAP_SIZE);
1227 			bus->tcm = NULL;
1228 		}
1229 
1230 		dhdpcie_bus_release_malloc(bus, osh);
1231 		/* Detach pcie shared structure */
1232 		if (bus->pcie_sh) {
1233 			MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
1234 			bus->pcie_sh = NULL;
1235 		}
1236 
1237 		if (bus->console.buf != NULL) {
1238 			MFREE(osh, bus->console.buf, bus->console.bufsize);
1239 		}
1240 
1241 
1242 		/* Finally free bus info */
1243 		MFREE(osh, bus, sizeof(dhd_bus_t));
1244 
1245 	}
1246 
1247 	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
1248 } /* dhdpcie_bus_release */
1249 
1250 
1251 void
dhdpcie_bus_release_dongle(dhd_bus_t * bus,osl_t * osh,bool dongle_isolation,bool reset_flag)1252 dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
1253 {
1254 	DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
1255 		bus->dhd, bus->dhd->dongle_reset));
1256 
1257 	if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) {
1258 		DHD_TRACE(("%s Exit\n", __FUNCTION__));
1259 		return;
1260 	}
1261 
1262 	if (bus->sih) {
1263 
1264 		if (!dongle_isolation &&
1265 		(bus->dhd && !bus->dhd->is_pcie_watchdog_reset))
1266 			pcie_watchdog_reset(bus->osh, bus->sih,
1267 				(sbpcieregs_t *) bus->regs);
1268 #ifdef DHD_EFI
1269 		dhdpcie_dongle_pwr_toggle(bus);
1270 #endif
1271 		if (bus->ltrsleep_on_unload) {
1272 			si_corereg(bus->sih, bus->sih->buscoreidx,
1273 				OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
1274 		}
1275 
1276 		if (bus->sih->buscorerev == 13)
1277 			 pcie_serdes_iddqdisable(bus->osh, bus->sih,
1278 			                         (sbpcieregs_t *) bus->regs);
1279 
1280 		/* Disable CLKREQ# */
1281 		dhdpcie_clkreq(bus->osh, 1, 0);
1282 
1283 		if (bus->sih != NULL) {
1284 			si_detach(bus->sih);
1285 			bus->sih = NULL;
1286 		}
1287 		if (bus->vars && bus->varsz)
1288 			MFREE(osh, bus->vars, bus->varsz);
1289 		bus->vars = NULL;
1290 	}
1291 
1292 	DHD_TRACE(("%s Exit\n", __FUNCTION__));
1293 }
1294 
1295 uint32
dhdpcie_bus_cfg_read_dword(dhd_bus_t * bus,uint32 addr,uint32 size)1296 dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size)
1297 {
1298 	uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size);
1299 	return data;
1300 }
1301 
1302 /** 32 bit config write */
1303 void
dhdpcie_bus_cfg_write_dword(dhd_bus_t * bus,uint32 addr,uint32 size,uint32 data)1304 dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data)
1305 {
1306 	OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data);
1307 }
1308 
1309 void
dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t * bus,uint32 data)1310 dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data)
1311 {
1312 	OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data);
1313 }
1314 
1315 void
dhdpcie_bus_dongle_setmemsize(struct dhd_bus * bus,int mem_size)1316 dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
1317 {
1318 	int32 min_size =  DONGLE_MIN_MEMSIZE;
1319 	/* Restrict the memsize to user specified limit */
1320 	DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
1321 		dhd_dongle_memsize, min_size));
1322 	if ((dhd_dongle_memsize > min_size) &&
1323 		(dhd_dongle_memsize < (int32)bus->orig_ramsize))
1324 		bus->ramsize = dhd_dongle_memsize;
1325 }
1326 
1327 void
dhdpcie_bus_release_malloc(dhd_bus_t * bus,osl_t * osh)1328 dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh)
1329 {
1330 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1331 
1332 	if (bus->dhd && bus->dhd->dongle_reset)
1333 		return;
1334 
1335 	if (bus->vars && bus->varsz) {
1336 		MFREE(osh, bus->vars, bus->varsz);
1337 		bus->vars = NULL;
1338 	}
1339 
1340 	DHD_TRACE(("%s: Exit\n", __FUNCTION__));
1341 	return;
1342 
1343 }
1344 
1345 /** Stop bus module: clear pending frames, disable data flow */
dhd_bus_stop(struct dhd_bus * bus,bool enforce_mutex)1346 void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
1347 {
1348 	uint32 status;
1349 	unsigned long flags;
1350 
1351 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1352 
1353 	if (!bus->dhd)
1354 		return;
1355 
1356 	if (bus->dhd->busstate == DHD_BUS_DOWN) {
1357 		DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__));
1358 		goto done;
1359 	}
1360 
1361 	DHD_DISABLE_RUNTIME_PM(bus->dhd);
1362 
1363 	DHD_GENERAL_LOCK(bus->dhd, flags);
1364 	bus->dhd->busstate = DHD_BUS_DOWN;
1365 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
1366 
1367 	dhdpcie_bus_intr_disable(bus);
1368 	status =  dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
1369 	dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
1370 
1371 	if (!dhd_download_fw_on_driverload) {
1372 		dhd_dpc_kill(bus->dhd);
1373 	}
1374 
1375 	/* Clear rx control and wake any waiters */
1376 	dhd_os_set_ioctl_resp_timeout(IOCTL_DISABLE_TIMEOUT);
1377 	dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_BUS_STOP);
1378 
1379 done:
1380 	return;
1381 }
1382 
1383 #ifdef DEVICE_TX_STUCK_DETECT
1384 void
dhd_bus_send_msg_to_daemon(int reason)1385 dhd_bus_send_msg_to_daemon(int reason)
1386 {
1387 	bcm_to_info_t to_info;
1388 
1389 	to_info.magic = BCM_TO_MAGIC;
1390 	to_info.reason = reason;
1391 
1392 	dhd_send_msg_to_daemon(NULL, (void *)&to_info, sizeof(bcm_to_info_t));
1393 	return;
1394 }
1395 
1396 /**
1397  * scan the flow rings in active list to check if stuck and notify application
1398  * The conditions for warn/stuck detection are
1399  * 1. Flow ring is active
1400  * 2. There are packets to be consumed by the consumer (wr != rd)
1401  * If 1 and 2 are true, then
1402  * 3. Warn, if Tx completion is not received for a duration of DEVICE_TX_STUCK_WARN_DURATION
1403  * 4. Trap FW, if Tx completion is not received for a duration of DEVICE_TX_STUCK_DURATION
1404  */
1405 static void
dhd_bus_device_tx_stuck_scan(dhd_bus_t * bus)1406 dhd_bus_device_tx_stuck_scan(dhd_bus_t *bus)
1407 {
1408 	uint32 tx_cmpl;
1409 	unsigned long list_lock_flags;
1410 	unsigned long ring_lock_flags;
1411 	dll_t *item, *prev;
1412 	flow_ring_node_t *flow_ring_node;
1413 	bool ring_empty;
1414 	bool active;
1415 
1416 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, list_lock_flags);
1417 
1418 	for (item = dll_tail_p(&bus->flowring_active_list);
1419 			!dll_end(&bus->flowring_active_list, item); item = prev) {
1420 
1421 		prev = dll_prev_p(item);
1422 
1423 		flow_ring_node = dhd_constlist_to_flowring(item);
1424 		DHD_FLOWRING_LOCK(flow_ring_node->lock, ring_lock_flags);
1425 		tx_cmpl = flow_ring_node->tx_cmpl;
1426 		active = flow_ring_node->active;
1427 		ring_empty = dhd_prot_is_cmpl_ring_empty(bus->dhd, flow_ring_node->prot_info);
1428 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, ring_lock_flags);
1429 
1430 		if (ring_empty) {
1431 			/* reset conters... etc */
1432 			flow_ring_node->stuck_count = 0;
1433 			flow_ring_node->tx_cmpl_prev = tx_cmpl;
1434 			continue;
1435 		}
1436 		/**
1437 		 * DEVICE_TX_STUCK_WARN_DURATION, DEVICE_TX_STUCK_DURATION are integer
1438 		 * representation of time, to decide if a flow is in warn state or stuck.
1439 		 *
1440 		 * flow_ring_node->stuck_count is an integer counter representing how long
1441 		 * tx_cmpl is not received though there are pending packets in the ring
1442 		 * to be consumed by the dongle for that particular flow.
1443 		 *
1444 		 * This method of determining time elapsed is helpful in sleep/wake scenarios.
1445 		 * If host sleeps and wakes up, that sleep time is not considered into
1446 		 * stuck duration.
1447 		 */
1448 		if ((tx_cmpl == flow_ring_node->tx_cmpl_prev) && active) {
1449 
1450 			flow_ring_node->stuck_count++;
1451 
1452 			DHD_ERROR(("%s: flowid: %d tx_cmpl: %u tx_cmpl_prev: %u stuck_count: %d\n",
1453 				__func__, flow_ring_node->flowid, tx_cmpl,
1454 				flow_ring_node->tx_cmpl_prev, flow_ring_node->stuck_count));
1455 
1456 			switch (flow_ring_node->stuck_count) {
1457 				case DEVICE_TX_STUCK_WARN_DURATION:
1458 					/**
1459 					 * Notify Device Tx Stuck Notification App about the
1460 					 * device Tx stuck warning for this flowid.
1461 					 * App will collect the logs required.
1462 					 */
1463 					DHD_ERROR(("stuck warning for flowid: %d sent to app\n",
1464 						flow_ring_node->flowid));
1465 					dhd_bus_send_msg_to_daemon(REASON_DEVICE_TX_STUCK_WARNING);
1466 					break;
1467 				case DEVICE_TX_STUCK_DURATION:
1468 					/**
1469 					 * Notify Device Tx Stuck Notification App about the
1470 					 * device Tx stuck info for this flowid.
1471 					 * App will collect the logs required.
1472 					 */
1473 					DHD_ERROR(("stuck information for flowid: %d sent to app\n",
1474 						flow_ring_node->flowid));
1475 					dhd_bus_send_msg_to_daemon(REASON_DEVICE_TX_STUCK);
1476 					break;
1477 				default:
1478 					break;
1479 			}
1480 		} else {
1481 			flow_ring_node->tx_cmpl_prev = tx_cmpl;
1482 			flow_ring_node->stuck_count = 0;
1483 		}
1484 	}
1485 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, list_lock_flags);
1486 }
1487 /**
1488  * schedules dhd_bus_device_tx_stuck_scan after DEVICE_TX_STUCK_CKECK_TIMEOUT,
1489  * to determine if any flowid is stuck.
1490  */
1491 static void
dhd_bus_device_stuck_scan(dhd_bus_t * bus)1492 dhd_bus_device_stuck_scan(dhd_bus_t *bus)
1493 {
1494 	uint32 time_stamp; /* in millisec */
1495 	uint32 diff;
1496 
1497 	/* Need not run the algorith if Dongle has trapped */
1498 	if (bus->dhd->dongle_trap_occured) {
1499 		return;
1500 	}
1501 	time_stamp = OSL_SYSUPTIME();
1502 	diff = time_stamp - bus->device_tx_stuck_check;
1503 	if (diff > DEVICE_TX_STUCK_CKECK_TIMEOUT) {
1504 		dhd_bus_device_tx_stuck_scan(bus);
1505 		bus->device_tx_stuck_check = OSL_SYSUPTIME();
1506 	}
1507 	return;
1508 }
1509 #endif /* DEVICE_TX_STUCK_DETECT */
1510 
1511 /** Watchdog timer function */
dhd_bus_watchdog(dhd_pub_t * dhd)1512 bool dhd_bus_watchdog(dhd_pub_t *dhd)
1513 {
1514 	unsigned long flags;
1515 	dhd_bus_t *bus;
1516 	bus = dhd->bus;
1517 
1518 	DHD_GENERAL_LOCK(dhd, flags);
1519 	if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) ||
1520 			DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd)) {
1521 		DHD_GENERAL_UNLOCK(dhd, flags);
1522 		return FALSE;
1523 	}
1524 	DHD_BUS_BUSY_SET_IN_WD(dhd);
1525 	DHD_GENERAL_UNLOCK(dhd, flags);
1526 
1527 #ifdef DHD_PCIE_RUNTIMEPM
1528 	dhdpcie_runtime_bus_wake(dhd, TRUE, __builtin_return_address(0));
1529 #endif /* DHD_PCIE_RUNTIMEPM */
1530 
1531 
1532 
1533 	/* Poll for console output periodically */
1534 	if (dhd->busstate == DHD_BUS_DATA &&
1535 		dhd_console_ms != 0 && !bus->d3_suspend_pending) {
1536 		bus->console.count += dhd_watchdog_ms;
1537 		if (bus->console.count >= dhd_console_ms) {
1538 			bus->console.count -= dhd_console_ms;
1539 			/* Make sure backplane clock is on */
1540 			if (dhdpcie_bus_readconsole(bus) < 0)
1541 				dhd_console_ms = 0;	/* On error, stop trying */
1542 		}
1543 	}
1544 
1545 #ifdef DHD_READ_INTSTATUS_IN_DPC
1546 	if (bus->poll) {
1547 		bus->ipend = TRUE;
1548 		bus->dpc_sched = TRUE;
1549 		dhd_sched_dpc(bus->dhd);     /* queue DPC now!! */
1550 	}
1551 #endif /* DHD_READ_INTSTATUS_IN_DPC */
1552 
1553 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
1554 	/* If haven't communicated with device for a while, deassert the Device_Wake GPIO */
1555 	if (dhd_doorbell_timeout != 0 && dhd->busstate == DHD_BUS_DATA &&
1556 		dhd->up && dhd_timeout_expired(&bus->doorbell_timer)) {
1557 		dhd_bus_set_device_wake(bus, FALSE);
1558 	}
1559 #endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
1560 #ifdef PCIE_INB_DW
1561 	if (INBAND_DW_ENAB(bus)) {
1562 		if (bus->ds_exit_timeout) {
1563 			bus->ds_exit_timeout --;
1564 			if (bus->ds_exit_timeout == 1) {
1565 				DHD_ERROR(("DS-EXIT TIMEOUT\n"));
1566 				bus->ds_exit_timeout = 0;
1567 				bus->inband_ds_exit_to_cnt++;
1568 			}
1569 		}
1570 		if (bus->host_sleep_exit_timeout) {
1571 			bus->host_sleep_exit_timeout --;
1572 			if (bus->host_sleep_exit_timeout == 1) {
1573 				DHD_ERROR(("HOST_SLEEP-EXIT TIMEOUT\n"));
1574 				bus->host_sleep_exit_timeout = 0;
1575 				bus->inband_host_sleep_exit_to_cnt++;
1576 			}
1577 		}
1578 	}
1579 #endif /* PCIE_INB_DW */
1580 
1581 #ifdef DEVICE_TX_STUCK_DETECT
1582 	if (dhd->bus->dev_tx_stuck_monitor == TRUE) {
1583 		dhd_bus_device_stuck_scan(dhd->bus);
1584 	}
1585 #endif /* DEVICE_TX_STUCK_DETECT */
1586 
1587 	DHD_GENERAL_LOCK(dhd, flags);
1588 	DHD_BUS_BUSY_CLEAR_IN_WD(dhd);
1589 	dhd_os_busbusy_wake(dhd);
1590 	DHD_GENERAL_UNLOCK(dhd, flags);
1591 	return TRUE;
1592 } /* dhd_bus_watchdog */
1593 
1594 
1595 uint16
dhd_get_chipid(dhd_pub_t * dhd)1596 dhd_get_chipid(dhd_pub_t *dhd)
1597 {
1598 	dhd_bus_t *bus = dhd->bus;
1599 
1600 	if (bus && bus->sih)
1601 		return (uint16)si_chipid(bus->sih);
1602 	else
1603 		return 0;
1604 }
1605 
1606 /* Download firmware image and nvram image */
1607 int
dhd_bus_download_firmware(struct dhd_bus * bus,osl_t * osh,char * pfw_path,char * pnv_path,char * pclm_path,char * pconf_path)1608 dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
1609                           char *pfw_path, char *pnv_path,
1610                           char *pclm_path, char *pconf_path)
1611 {
1612 	int ret;
1613 
1614 	bus->fw_path = pfw_path;
1615 	bus->nv_path = pnv_path;
1616 	bus->dhd->clm_path = pclm_path;
1617 	bus->dhd->conf_path = pconf_path;
1618 
1619 
1620 #if defined(DHD_BLOB_EXISTENCE_CHECK)
1621 	dhd_set_blob_support(bus->dhd, bus->fw_path);
1622 #endif /* DHD_BLOB_EXISTENCE_CHECK */
1623 
1624 	DHD_ERROR(("%s: firmware path=%s, nvram path=%s\n",
1625 		__FUNCTION__, bus->fw_path, bus->nv_path));
1626 
1627 	ret = dhdpcie_download_firmware(bus, osh);
1628 
1629 	return ret;
1630 }
1631 
1632 void
dhd_set_bus_params(struct dhd_bus * bus)1633 dhd_set_bus_params(struct dhd_bus *bus)
1634 {
1635 	if (bus->dhd->conf->dhd_poll >= 0) {
1636 		bus->poll = bus->dhd->conf->dhd_poll;
1637 		if (!bus->pollrate)
1638 			bus->pollrate = 1;
1639 		printf("%s: set polling mode %d\n", __FUNCTION__, bus->dhd->conf->dhd_poll);
1640 	}
1641 }
1642 
1643 static int
dhdpcie_download_firmware(struct dhd_bus * bus,osl_t * osh)1644 dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
1645 {
1646 	int ret = 0;
1647 #if defined(BCM_REQUEST_FW)
1648 	uint chipid = bus->sih->chip;
1649 	uint revid = bus->sih->chiprev;
1650 	char fw_path[64] = "/lib/firmware/brcm/bcm";	/* path to firmware image */
1651 	char nv_path[64];		/* path to nvram vars file */
1652 	bus->fw_path = fw_path;
1653 	bus->nv_path = nv_path;
1654 	switch (chipid) {
1655 	case BCM43570_CHIP_ID:
1656 		bcmstrncat(fw_path, "43570", 5);
1657 		switch (revid) {
1658 		case 0:
1659 			bcmstrncat(fw_path, "a0", 2);
1660 			break;
1661 		case 2:
1662 			bcmstrncat(fw_path, "a2", 2);
1663 			break;
1664 		default:
1665 			DHD_ERROR(("%s: revid is not found %x\n", __FUNCTION__,
1666 			revid));
1667 			break;
1668 		}
1669 		break;
1670 	default:
1671 		DHD_ERROR(("%s: unsupported device %x\n", __FUNCTION__,
1672 		chipid));
1673 		return 0;
1674 	}
1675 	/* load board specific nvram file */
1676 	snprintf(bus->nv_path, sizeof(nv_path), "%s.nvm", fw_path);
1677 	/* load firmware */
1678 	snprintf(bus->fw_path, sizeof(fw_path), "%s-firmware.bin", fw_path);
1679 #endif /* BCM_REQUEST_FW */
1680 
1681 	DHD_OS_WAKE_LOCK(bus->dhd);
1682 
1683 	dhd_conf_set_path_params(bus->dhd, bus->fw_path, bus->nv_path);
1684 	dhd_set_bus_params(bus);
1685 
1686 	ret = _dhdpcie_download_firmware(bus);
1687 
1688 	DHD_OS_WAKE_UNLOCK(bus->dhd);
1689 	return ret;
1690 }
1691 
1692 static int
dhdpcie_download_code_file(struct dhd_bus * bus,char * pfw_path)1693 dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
1694 {
1695 	int bcmerror = BCME_ERROR;
1696 	int offset = 0;
1697 	int len = 0;
1698 	bool store_reset;
1699 	char *imgbuf = NULL;
1700 	uint8 *memblock = NULL, *memptr;
1701 	uint8 *memptr_tmp = NULL; // terence: check downloaded firmware is correct
1702 
1703 	int offset_end = bus->ramsize;
1704 
1705 #ifndef DHD_EFI
1706 	DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
1707 #endif /* DHD_EFI */
1708 
1709 	/* Should succeed in opening image if it is actually given through registry
1710 	 * entry or in module param.
1711 	 */
1712 	imgbuf = dhd_os_open_image(pfw_path);
1713 	if (imgbuf == NULL) {
1714 		printf("%s: Open firmware file failed %s\n", __FUNCTION__, pfw_path);
1715 		goto err;
1716 	}
1717 
1718 	memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
1719 	if (memblock == NULL) {
1720 		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
1721 		goto err;
1722 	}
1723 	if (dhd_msg_level & DHD_TRACE_VAL) {
1724 		memptr_tmp = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
1725 		if (memptr_tmp == NULL) {
1726 			DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
1727 			goto err;
1728 		}
1729 	}
1730 	if ((uint32)(uintptr)memblock % DHD_SDALIGN) {
1731 		memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
1732 	}
1733 
1734 
1735 	/* check if CR4/CA7 */
1736 	store_reset = (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
1737 			si_setcore(bus->sih, ARMCA7_CORE_ID, 0));
1738 
1739 	/* Download image with MEMBLOCK size */
1740 	while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, imgbuf))) {
1741 		if (len < 0) {
1742 			DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
1743 			bcmerror = BCME_ERROR;
1744 			goto err;
1745 		}
1746 		/* if address is 0, store the reset instruction to be written in 0 */
1747 		if (store_reset) {
1748 			ASSERT(offset == 0);
1749 			bus->resetinstr = *(((uint32*)memptr));
1750 			/* Add start of RAM address to the address given by user */
1751 			offset += bus->dongle_ram_base;
1752 			offset_end += offset;
1753 			store_reset = FALSE;
1754 		}
1755 
1756 		bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
1757 		if (bcmerror) {
1758 			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
1759 				__FUNCTION__, bcmerror, MEMBLOCK, offset));
1760 			goto err;
1761 		}
1762 
1763 		if (dhd_msg_level & DHD_TRACE_VAL) {
1764 			bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset, memptr_tmp, len);
1765 			if (bcmerror) {
1766 				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
1767 				        __FUNCTION__, bcmerror, MEMBLOCK, offset));
1768 				goto err;
1769 			}
1770 			if (memcmp(memptr_tmp, memptr, len)) {
1771 				DHD_ERROR(("%s: Downloaded image is corrupted.\n", __FUNCTION__));
1772 				goto err;
1773 			} else
1774 				DHD_INFO(("%s: Download, Upload and compare succeeded.\n", __FUNCTION__));
1775 		}
1776 		offset += MEMBLOCK;
1777 
1778 		if (offset >= offset_end) {
1779 			DHD_ERROR(("%s: invalid address access to %x (offset end: %x)\n",
1780 				__FUNCTION__, offset, offset_end));
1781 			bcmerror = BCME_ERROR;
1782 			goto err;
1783 		}
1784 	}
1785 
1786 err:
1787 	if (memblock) {
1788 		MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
1789 		if (dhd_msg_level & DHD_TRACE_VAL) {
1790 			if (memptr_tmp)
1791 				MFREE(bus->dhd->osh, memptr_tmp, MEMBLOCK + DHD_SDALIGN);
1792 		}
1793 	}
1794 
1795 	if (imgbuf) {
1796 		dhd_os_close_image(imgbuf);
1797 	}
1798 
1799 	return bcmerror;
1800 } /* dhdpcie_download_code_file */
1801 
1802 #ifdef CUSTOMER_HW4_DEBUG
1803 #define MIN_NVRAMVARS_SIZE 128
1804 #endif /* CUSTOMER_HW4_DEBUG */
1805 
1806 static int
dhdpcie_download_nvram(struct dhd_bus * bus)1807 dhdpcie_download_nvram(struct dhd_bus *bus)
1808 {
1809 	int bcmerror = BCME_ERROR;
1810 	uint len;
1811 	char * memblock = NULL;
1812 	char *bufp;
1813 	char *pnv_path;
1814 	bool nvram_file_exists;
1815 	bool nvram_uefi_exists = FALSE;
1816 	bool local_alloc = FALSE;
1817 	pnv_path = bus->nv_path;
1818 
1819 #ifdef BCMEMBEDIMAGE
1820 	nvram_file_exists = TRUE;
1821 #else
1822 	nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
1823 #endif
1824 
1825 	/* First try UEFI */
1826 	len = MAX_NVRAMBUF_SIZE;
1827 	dhd_get_download_buffer(bus->dhd, NULL, NVRAM, &memblock, (int *)&len);
1828 
1829 	/* If UEFI empty, then read from file system */
1830 	if ((len <= 0) || (memblock == NULL)) {
1831 
1832 		if (nvram_file_exists) {
1833 			len = MAX_NVRAMBUF_SIZE;
1834 			dhd_get_download_buffer(bus->dhd, pnv_path, NVRAM, &memblock, (int *)&len);
1835 			if ((len <= 0 || len > MAX_NVRAMBUF_SIZE)) {
1836 				goto err;
1837 			}
1838 		}
1839 		else {
1840 			/* For SROM OTP no external file or UEFI required */
1841 			bcmerror = BCME_OK;
1842 		}
1843 	} else {
1844 		nvram_uefi_exists = TRUE;
1845 	}
1846 
1847 	DHD_ERROR(("%s: dhd_get_download_buffer len %d\n", __FUNCTION__, len));
1848 
1849 	if (len > 0 && len <= MAX_NVRAMBUF_SIZE && memblock != NULL) {
1850 		bufp = (char *) memblock;
1851 
1852 #ifdef CACHE_FW_IMAGES
1853 		if (bus->processed_nvram_params_len) {
1854 			len = bus->processed_nvram_params_len;
1855 		}
1856 
1857 		if (!bus->processed_nvram_params_len) {
1858 			bufp[len] = 0;
1859 			if (nvram_uefi_exists || nvram_file_exists) {
1860 				len = process_nvram_vars(bufp, len);
1861 				bus->processed_nvram_params_len = len;
1862 			}
1863 		} else
1864 #else
1865 		{
1866 			bufp[len] = 0;
1867 			if (nvram_uefi_exists || nvram_file_exists) {
1868 				len = process_nvram_vars(bufp, len);
1869 			}
1870 		}
1871 #endif /* CACHE_FW_IMAGES */
1872 
1873 		DHD_ERROR(("%s: process_nvram_vars len %d\n", __FUNCTION__, len));
1874 #ifdef CUSTOMER_HW4_DEBUG
1875 		if (len < MIN_NVRAMVARS_SIZE) {
1876 			DHD_ERROR(("%s: invalid nvram size in process_nvram_vars \n",
1877 				__FUNCTION__));
1878 			bcmerror = BCME_ERROR;
1879 			goto err;
1880 		}
1881 #endif /* CUSTOMER_HW4_DEBUG */
1882 
1883 		if (len % 4) {
1884 			len += 4 - (len % 4);
1885 		}
1886 		bufp += len;
1887 		*bufp++ = 0;
1888 		if (len)
1889 			bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1);
1890 		if (bcmerror) {
1891 			DHD_ERROR(("%s: error downloading vars: %d\n",
1892 				__FUNCTION__, bcmerror));
1893 		}
1894 	}
1895 
1896 
1897 err:
1898 	if (memblock) {
1899 		if (local_alloc) {
1900 			MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
1901 		} else {
1902 			dhd_free_download_buffer(bus->dhd, memblock, MAX_NVRAMBUF_SIZE);
1903 		}
1904 	}
1905 
1906 	return bcmerror;
1907 }
1908 
1909 
1910 #ifdef BCMEMBEDIMAGE
1911 int
dhdpcie_download_code_array(struct dhd_bus * bus)1912 dhdpcie_download_code_array(struct dhd_bus *bus)
1913 {
1914 	int bcmerror = -1;
1915 	int offset = 0;
1916 	unsigned char *p_dlarray  = NULL;
1917 	unsigned int dlarray_size = 0;
1918 	unsigned int downloded_len, remaining_len, len;
1919 	char *p_dlimagename, *p_dlimagever, *p_dlimagedate;
1920 	uint8 *memblock = NULL, *memptr;
1921 
1922 	downloded_len = 0;
1923 	remaining_len = 0;
1924 	len = 0;
1925 
1926 #ifdef DHD_EFI
1927 	p_dlarray = rtecdc_fw_arr;
1928 	dlarray_size = sizeof(rtecdc_fw_arr);
1929 #else
1930 	p_dlarray = dlarray;
1931 	dlarray_size = sizeof(dlarray);
1932 	p_dlimagename = dlimagename;
1933 	p_dlimagever  = dlimagever;
1934 	p_dlimagedate = dlimagedate;
1935 #endif /* DHD_EFI */
1936 
1937 #ifndef DHD_EFI
1938 	if ((p_dlarray == 0) ||	(dlarray_size == 0) ||(dlarray_size > bus->ramsize) ||
1939 		(p_dlimagename == 0) ||	(p_dlimagever  == 0) ||	(p_dlimagedate == 0))
1940 		goto err;
1941 #endif /* DHD_EFI */
1942 
1943 	memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
1944 	if (memblock == NULL) {
1945 		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
1946 		goto err;
1947 	}
1948 	if ((uint32)(uintptr)memblock % DHD_SDALIGN)
1949 		memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
1950 
1951 	while (downloded_len  < dlarray_size) {
1952 		remaining_len = dlarray_size - downloded_len;
1953 		if (remaining_len >= MEMBLOCK)
1954 			len = MEMBLOCK;
1955 		else
1956 			len = remaining_len;
1957 
1958 		memcpy(memptr, (p_dlarray + downloded_len), len);
1959 		/* check if CR4/CA7 */
1960 		if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0) ||
1961 			si_setcore(bus->sih, SYSMEM_CORE_ID, 0)) {
1962 			/* if address is 0, store the reset instruction to be written in 0 */
1963 			if (offset == 0) {
1964 				bus->resetinstr = *(((uint32*)memptr));
1965 				/* Add start of RAM address to the address given by user */
1966 				offset += bus->dongle_ram_base;
1967 			}
1968 		}
1969 		bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
1970 		downloded_len += len;
1971 		if (bcmerror) {
1972 			DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
1973 				__FUNCTION__, bcmerror, MEMBLOCK, offset));
1974 			goto err;
1975 		}
1976 		offset += MEMBLOCK;
1977 	}
1978 
1979 #ifdef DHD_DEBUG
1980 	/* Upload and compare the downloaded code */
1981 	{
1982 		unsigned char *ularray = NULL;
1983 		unsigned int uploded_len;
1984 		uploded_len = 0;
1985 		bcmerror = -1;
1986 		ularray = MALLOC(bus->dhd->osh, dlarray_size);
1987 		if (ularray == NULL)
1988 			goto upload_err;
1989 		/* Upload image to verify downloaded contents. */
1990 		offset = bus->dongle_ram_base;
1991 		memset(ularray, 0xaa, dlarray_size);
1992 		while (uploded_len  < dlarray_size) {
1993 			remaining_len = dlarray_size - uploded_len;
1994 			if (remaining_len >= MEMBLOCK)
1995 				len = MEMBLOCK;
1996 			else
1997 				len = remaining_len;
1998 			bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset,
1999 				(uint8 *)(ularray + uploded_len), len);
2000 			if (bcmerror) {
2001 				DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
2002 					__FUNCTION__, bcmerror, MEMBLOCK, offset));
2003 				goto upload_err;
2004 			}
2005 
2006 			uploded_len += len;
2007 			offset += MEMBLOCK;
2008 		}
2009 #ifdef DHD_EFI
2010 		if (memcmp(p_dlarray, ularray, dlarray_size)) {
2011 			DHD_ERROR(("%s: Downloaded image is corrupted ! \n", __FUNCTION__));
2012 			goto upload_err;
2013 		} else
2014 			DHD_ERROR(("%s: Download, Upload and compare succeeded .\n", __FUNCTION__));
2015 #else
2016 		if (memcmp(p_dlarray, ularray, dlarray_size)) {
2017 			DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
2018 				__FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
2019 			goto upload_err;
2020 
2021 		} else
2022 			DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
2023 				__FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
2024 #endif /* DHD_EFI */
2025 
2026 upload_err:
2027 		if (ularray)
2028 			MFREE(bus->dhd->osh, ularray, dlarray_size);
2029 	}
2030 #endif /* DHD_DEBUG */
2031 err:
2032 
2033 	if (memblock)
2034 		MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
2035 
2036 	return bcmerror;
2037 } /* dhdpcie_download_code_array */
2038 #endif /* BCMEMBEDIMAGE */
2039 
2040 
2041 static int
dhdpcie_ramsize_read_image(struct dhd_bus * bus,char * buf,int len)2042 dhdpcie_ramsize_read_image(struct dhd_bus *bus, char *buf, int len)
2043 {
2044 	int bcmerror = BCME_ERROR;
2045 	char *imgbuf = NULL;
2046 
2047 	if (buf == NULL || len == 0)
2048 		goto err;
2049 
2050 	/* External image takes precedence if specified */
2051 	if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
2052 		imgbuf = dhd_os_open_image(bus->fw_path);
2053 		if (imgbuf == NULL) {
2054 			DHD_ERROR(("%s: Failed to open firmware file\n", __FUNCTION__));
2055 			goto err;
2056 		}
2057 
2058 		/* Read it */
2059 		if (len != dhd_os_get_image_block(buf, len, imgbuf)) {
2060 			DHD_ERROR(("%s: Failed to read %d bytes data\n", __FUNCTION__, len));
2061 			goto err;
2062 		}
2063 
2064 		bcmerror = BCME_OK;
2065 	}
2066 
2067 err:
2068 	if (imgbuf)
2069 		dhd_os_close_image(imgbuf);
2070 
2071 	return bcmerror;
2072 }
2073 
2074 
2075 /* The ramsize can be changed in the dongle image, for example 4365 chip share the sysmem
2076  * with BMC and we can adjust how many sysmem belong to CA7 during dongle compilation.
2077  * So in DHD we need to detect this case and update the correct dongle RAMSIZE as well.
2078  */
2079 static void
dhdpcie_ramsize_adj(struct dhd_bus * bus)2080 dhdpcie_ramsize_adj(struct dhd_bus *bus)
2081 {
2082 	int i, search_len = 0;
2083 	uint8 *memptr = NULL;
2084 	uint8 *ramsizeptr = NULL;
2085 	uint ramsizelen;
2086 	uint32 ramsize_ptr_ptr[] = {RAMSIZE_PTR_PTR_LIST};
2087 	hnd_ramsize_ptr_t ramsize_info;
2088 
2089 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2090 
2091 	/* Adjust dongle RAMSIZE already called. */
2092 	if (bus->ramsize_adjusted) {
2093 		return;
2094 	}
2095 
2096 	/* success or failure,  we don't want to be here
2097 	 * more than once.
2098 	 */
2099 	bus->ramsize_adjusted = TRUE;
2100 
2101 	/* Not handle if user restrict dongle ram size enabled */
2102 	if (dhd_dongle_memsize) {
2103 		DHD_ERROR(("%s: user restrict dongle ram size to %d.\n", __FUNCTION__,
2104 			dhd_dongle_memsize));
2105 		return;
2106 	}
2107 
2108 #ifndef BCMEMBEDIMAGE
2109 	/* Out immediately if no image to download */
2110 	if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
2111 		DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
2112 		return;
2113 	}
2114 #endif /* !BCMEMBEDIMAGE */
2115 
2116 	/* Get maximum RAMSIZE info search length */
2117 	for (i = 0; ; i++) {
2118 		if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END)
2119 			break;
2120 
2121 		if (search_len < (int)ramsize_ptr_ptr[i])
2122 			search_len = (int)ramsize_ptr_ptr[i];
2123 	}
2124 
2125 	if (!search_len)
2126 		return;
2127 
2128 	search_len += sizeof(hnd_ramsize_ptr_t);
2129 
2130 	memptr = MALLOC(bus->dhd->osh, search_len);
2131 	if (memptr == NULL) {
2132 		DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, search_len));
2133 		return;
2134 	}
2135 
2136 	/* External image takes precedence if specified */
2137 	if (dhdpcie_ramsize_read_image(bus, (char *)memptr, search_len) != BCME_OK) {
2138 #if defined(BCMEMBEDIMAGE) && !defined(DHD_EFI)
2139 		unsigned char *p_dlarray  = NULL;
2140 		unsigned int dlarray_size = 0;
2141 		char *p_dlimagename, *p_dlimagever, *p_dlimagedate;
2142 
2143 		p_dlarray = dlarray;
2144 		dlarray_size = sizeof(dlarray);
2145 		p_dlimagename = dlimagename;
2146 		p_dlimagever  = dlimagever;
2147 		p_dlimagedate = dlimagedate;
2148 
2149 		if ((p_dlarray == 0) ||	(dlarray_size == 0) || (p_dlimagename == 0) ||
2150 			(p_dlimagever  == 0) ||	(p_dlimagedate == 0))
2151 			goto err;
2152 
2153 		ramsizeptr = p_dlarray;
2154 		ramsizelen = dlarray_size;
2155 #else
2156 		goto err;
2157 #endif /* BCMEMBEDIMAGE && !DHD_EFI */
2158 	}
2159 	else {
2160 		ramsizeptr = memptr;
2161 		ramsizelen = search_len;
2162 	}
2163 
2164 	if (ramsizeptr) {
2165 		/* Check Magic */
2166 		for (i = 0; ; i++) {
2167 			if (ramsize_ptr_ptr[i] == RAMSIZE_PTR_PTR_END)
2168 				break;
2169 
2170 			if (ramsize_ptr_ptr[i] + sizeof(hnd_ramsize_ptr_t) > ramsizelen)
2171 				continue;
2172 
2173 			memcpy((char *)&ramsize_info, ramsizeptr + ramsize_ptr_ptr[i],
2174 				sizeof(hnd_ramsize_ptr_t));
2175 
2176 			if (ramsize_info.magic == HTOL32(HND_RAMSIZE_PTR_MAGIC)) {
2177 				bus->orig_ramsize = LTOH32(ramsize_info.ram_size);
2178 				bus->ramsize = LTOH32(ramsize_info.ram_size);
2179 				DHD_ERROR(("%s: Adjust dongle RAMSIZE to 0x%x\n", __FUNCTION__,
2180 					bus->ramsize));
2181 				break;
2182 			}
2183 		}
2184 	}
2185 
2186 err:
2187 	if (memptr)
2188 		MFREE(bus->dhd->osh, memptr, search_len);
2189 
2190 	return;
2191 } /* _dhdpcie_download_firmware */
2192 
2193 static int
_dhdpcie_download_firmware(struct dhd_bus * bus)2194 _dhdpcie_download_firmware(struct dhd_bus *bus)
2195 {
2196 	int bcmerror = -1;
2197 
2198 	bool embed = FALSE;	/* download embedded firmware */
2199 	bool dlok = FALSE;	/* download firmware succeeded */
2200 
2201 	/* Out immediately if no image to download */
2202 	if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
2203 #ifdef BCMEMBEDIMAGE
2204 		embed = TRUE;
2205 #else
2206 		DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
2207 		return 0;
2208 #endif
2209 	}
2210 	/* Adjust ram size */
2211 	dhdpcie_ramsize_adj(bus);
2212 
2213 	/* Keep arm in reset */
2214 	if (dhdpcie_bus_download_state(bus, TRUE)) {
2215 		DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
2216 		goto err;
2217 	}
2218 
2219 	/* External image takes precedence if specified */
2220 	if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
2221 		if (dhdpcie_download_code_file(bus, bus->fw_path)) {
2222 			DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
2223 #ifdef BCMEMBEDIMAGE
2224 			embed = TRUE;
2225 #else
2226 			goto err;
2227 #endif
2228 		} else {
2229 			embed = FALSE;
2230 			dlok = TRUE;
2231 		}
2232 	}
2233 
2234 #ifdef BCMEMBEDIMAGE
2235 	if (embed) {
2236 		if (dhdpcie_download_code_array(bus)) {
2237 			DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
2238 			goto err;
2239 		} else {
2240 			dlok = TRUE;
2241 		}
2242 	}
2243 #else
2244 	BCM_REFERENCE(embed);
2245 #endif
2246 	if (!dlok) {
2247 		DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
2248 		goto err;
2249 	}
2250 
2251 	/* EXAMPLE: nvram_array */
2252 	/* If a valid nvram_arry is specified as above, it can be passed down to dongle */
2253 	/* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
2254 
2255 
2256 	/* External nvram takes precedence if specified */
2257 	if (dhdpcie_download_nvram(bus)) {
2258 		DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
2259 		goto err;
2260 	}
2261 
2262 	/* Take arm out of reset */
2263 	if (dhdpcie_bus_download_state(bus, FALSE)) {
2264 		DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
2265 		goto err;
2266 	}
2267 
2268 	bcmerror = 0;
2269 
2270 err:
2271 	return bcmerror;
2272 } /* _dhdpcie_download_firmware */
2273 
2274 #define CONSOLE_LINE_MAX	192
2275 
2276 static int
dhdpcie_bus_readconsole(dhd_bus_t * bus)2277 dhdpcie_bus_readconsole(dhd_bus_t *bus)
2278 {
2279 	dhd_console_t *c = &bus->console;
2280 	uint8 line[CONSOLE_LINE_MAX], ch;
2281 	uint32 n, idx, addr;
2282 	int rv;
2283 
2284 	/* Don't do anything until FWREADY updates console address */
2285 	if (bus->console_addr == 0)
2286 		return -1;
2287 
2288 	/* Read console log struct */
2289 	addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
2290 
2291 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
2292 		return rv;
2293 
2294 	/* Allocate console buffer (one time only) */
2295 	if (c->buf == NULL) {
2296 		c->bufsize = ltoh32(c->log.buf_size);
2297 		if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
2298 			return BCME_NOMEM;
2299 	}
2300 	idx = ltoh32(c->log.idx);
2301 
2302 	/* Protect against corrupt value */
2303 	if (idx > c->bufsize)
2304 		return BCME_ERROR;
2305 
2306 	/* Skip reading the console buffer if the index pointer has not moved */
2307 	if (idx == c->last)
2308 		return BCME_OK;
2309 
2310 	/* Read the console buffer */
2311 	addr = ltoh32(c->log.buf);
2312 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
2313 		return rv;
2314 
2315 	while (c->last != idx) {
2316 		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
2317 			if (c->last == idx) {
2318 				/* This would output a partial line.  Instead, back up
2319 				 * the buffer pointer and output this line next time around.
2320 				 */
2321 				if (c->last >= n)
2322 					c->last -= n;
2323 				else
2324 					c->last = c->bufsize - n;
2325 				goto break2;
2326 			}
2327 			ch = c->buf[c->last];
2328 			c->last = (c->last + 1) % c->bufsize;
2329 			if (ch == '\n')
2330 				break;
2331 			line[n] = ch;
2332 		}
2333 
2334 		if (n > 0) {
2335 			if (line[n - 1] == '\r')
2336 				n--;
2337 			line[n] = 0;
2338 			DHD_FWLOG(("CONSOLE: %s\n", line));
2339 		}
2340 	}
2341 break2:
2342 
2343 	return BCME_OK;
2344 } /* dhdpcie_bus_readconsole */
2345 
2346 void
dhd_bus_dump_console_buffer(dhd_bus_t * bus)2347 dhd_bus_dump_console_buffer(dhd_bus_t *bus)
2348 {
2349 	uint32 n, i;
2350 	uint32 addr;
2351 	char *console_buffer = NULL;
2352 	uint32 console_ptr, console_size, console_index;
2353 	uint8 line[CONSOLE_LINE_MAX], ch;
2354 	int rv;
2355 
2356 	DHD_ERROR(("%s: Dump Complete Console Buffer\n", __FUNCTION__));
2357 
2358 	if (bus->is_linkdown) {
2359 		DHD_ERROR(("%s: Skip dump Console Buffer due to PCIe link down\n", __FUNCTION__));
2360 		return;
2361 	}
2362 
2363 	addr =	bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
2364 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
2365 		(uint8 *)&console_ptr, sizeof(console_ptr))) < 0) {
2366 		goto exit;
2367 	}
2368 
2369 	addr =	bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
2370 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
2371 		(uint8 *)&console_size, sizeof(console_size))) < 0) {
2372 		goto exit;
2373 	}
2374 
2375 	addr =	bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
2376 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
2377 		(uint8 *)&console_index, sizeof(console_index))) < 0) {
2378 		goto exit;
2379 	}
2380 
2381 	console_ptr = ltoh32(console_ptr);
2382 	console_size = ltoh32(console_size);
2383 	console_index = ltoh32(console_index);
2384 
2385 	if (console_size > CONSOLE_BUFFER_MAX ||
2386 		!(console_buffer = MALLOC(bus->dhd->osh, console_size))) {
2387 		goto exit;
2388 	}
2389 
2390 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
2391 		(uint8 *)console_buffer, console_size)) < 0) {
2392 		goto exit;
2393 	}
2394 
2395 	for (i = 0, n = 0; i < console_size; i += n + 1) {
2396 		for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
2397 			ch = console_buffer[(console_index + i + n) % console_size];
2398 			if (ch == '\n')
2399 				break;
2400 			line[n] = ch;
2401 		}
2402 
2403 
2404 		if (n > 0) {
2405 			if (line[n - 1] == '\r')
2406 				n--;
2407 			line[n] = 0;
2408 			/* Don't use DHD_ERROR macro since we print
2409 			 * a lot of information quickly. The macro
2410 			 * will truncate a lot of the printfs
2411 			 */
2412 
2413 			DHD_FWLOG(("CONSOLE: %s\n", line));
2414 		}
2415 	}
2416 
2417 exit:
2418 	if (console_buffer)
2419 		MFREE(bus->dhd->osh, console_buffer, console_size);
2420 	return;
2421 }
2422 
2423 static int
dhdpcie_checkdied(dhd_bus_t * bus,char * data,uint size)2424 dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
2425 {
2426 	int bcmerror = 0;
2427 	uint msize = 512;
2428 	char *mbuffer = NULL;
2429 	uint maxstrlen = 256;
2430 	char *str = NULL;
2431 	pciedev_shared_t *local_pciedev_shared = bus->pcie_sh;
2432 	struct bcmstrbuf strbuf;
2433 	unsigned long flags;
2434 
2435 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2436 
2437 	if (DHD_NOCHECKDIED_ON()) {
2438 		return 0;
2439 	}
2440 
2441 	if (data == NULL) {
2442 		/*
2443 		 * Called after a rx ctrl timeout. "data" is NULL.
2444 		 * allocate memory to trace the trap or assert.
2445 		 */
2446 		size = msize;
2447 		mbuffer = data = MALLOC(bus->dhd->osh, msize);
2448 
2449 		if (mbuffer == NULL) {
2450 			DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
2451 			bcmerror = BCME_NOMEM;
2452 			goto done;
2453 		}
2454 	}
2455 
2456 	if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
2457 		DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
2458 		bcmerror = BCME_NOMEM;
2459 		goto done;
2460 	}
2461 	DHD_GENERAL_LOCK(bus->dhd, flags);
2462 	DHD_BUS_BUSY_SET_IN_CHECKDIED(bus->dhd);
2463 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
2464 
2465 	if ((bcmerror = dhdpcie_readshared(bus)) < 0) {
2466 		goto done;
2467 	}
2468 
2469 	bcm_binit(&strbuf, data, size);
2470 
2471 	bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address  : 0x%08X\n",
2472 	            local_pciedev_shared->msgtrace_addr, local_pciedev_shared->console_addr);
2473 
2474 	if ((local_pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
2475 		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
2476 		 * (Avoids conflict with real asserts for programmatic parsing of output.)
2477 		 */
2478 		bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
2479 	}
2480 
2481 	if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) {
2482 		/* NOTE: Misspelled assert is intentional - DO NOT FIX.
2483 		 * (Avoids conflict with real asserts for programmatic parsing of output.)
2484 		 */
2485 		bcm_bprintf(&strbuf, "No trap%s in dongle",
2486 		          (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT)
2487 		          ?"/assrt" :"");
2488 	} else {
2489 		if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) {
2490 			/* Download assert */
2491 			bcm_bprintf(&strbuf, "Dongle assert");
2492 			if (bus->pcie_sh->assert_exp_addr != 0) {
2493 				str[0] = '\0';
2494 				if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
2495 					bus->pcie_sh->assert_exp_addr,
2496 					(uint8 *)str, maxstrlen)) < 0) {
2497 					goto done;
2498 				}
2499 
2500 				str[maxstrlen - 1] = '\0';
2501 				bcm_bprintf(&strbuf, " expr \"%s\"", str);
2502 			}
2503 
2504 			if (bus->pcie_sh->assert_file_addr != 0) {
2505 				str[0] = '\0';
2506 				if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
2507 					bus->pcie_sh->assert_file_addr,
2508 					(uint8 *)str, maxstrlen)) < 0) {
2509 					goto done;
2510 				}
2511 
2512 				str[maxstrlen - 1] = '\0';
2513 				bcm_bprintf(&strbuf, " file \"%s\"", str);
2514 			}
2515 
2516 			bcm_bprintf(&strbuf, " line %d ",  bus->pcie_sh->assert_line);
2517 		}
2518 
2519 		if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
2520 			trap_t *tr = &bus->dhd->last_trap_info;
2521 			bus->dhd->dongle_trap_occured = TRUE;
2522 			if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
2523 				bus->pcie_sh->trap_addr, (uint8*)tr, sizeof(trap_t))) < 0) {
2524 				goto done;
2525 			}
2526 			dhd_bus_dump_trap_info(bus, &strbuf);
2527 
2528 			dhd_bus_dump_console_buffer(bus);
2529 		}
2530 	}
2531 
2532 	if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
2533 		printf("%s: %s\n", __FUNCTION__, strbuf.origbuf);
2534 #ifdef REPORT_FATAL_TIMEOUTS
2535 		/**
2536 		 * stop the timers as FW trapped
2537 		 */
2538 		if (dhd_stop_scan_timer(bus->dhd)) {
2539 			DHD_ERROR(("dhd_stop_scan_timer failed\n"));
2540 			ASSERT(0);
2541 		}
2542 		if (dhd_stop_bus_timer(bus->dhd)) {
2543 			DHD_ERROR(("dhd_stop_bus_timer failed\n"));
2544 			ASSERT(0);
2545 		}
2546 		if (dhd_stop_cmd_timer(bus->dhd)) {
2547 			DHD_ERROR(("dhd_stop_cmd_timer failed\n"));
2548 			ASSERT(0);
2549 		}
2550 		if (dhd_stop_join_timer(bus->dhd)) {
2551 			DHD_ERROR(("dhd_stop_join_timer failed\n"));
2552 			ASSERT(0);
2553 		}
2554 #endif /* REPORT_FATAL_TIMEOUTS */
2555 
2556 		dhd_prot_debug_info_print(bus->dhd);
2557 
2558 #if defined(DHD_FW_COREDUMP)
2559 		/* save core dump or write to a file */
2560 		if (bus->dhd->memdump_enabled) {
2561 			bus->dhd->memdump_type = DUMP_TYPE_DONGLE_TRAP;
2562 			dhdpcie_mem_dump(bus);
2563 		}
2564 #endif /* DHD_FW_COREDUMP */
2565 
2566 		/* wake up IOCTL wait event */
2567 		dhd_wakeup_ioctl_event(bus->dhd, IOCTL_RETURN_ON_TRAP);
2568 
2569 		dhd_schedule_reset(bus->dhd);
2570 
2571 
2572 	}
2573 
2574 	DHD_GENERAL_LOCK(bus->dhd, flags);
2575 	DHD_BUS_BUSY_CLEAR_IN_CHECKDIED(bus->dhd);
2576 	dhd_os_busbusy_wake(bus->dhd);
2577 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
2578 
2579 done:
2580 	if (mbuffer)
2581 		MFREE(bus->dhd->osh, mbuffer, msize);
2582 	if (str)
2583 		MFREE(bus->dhd->osh, str, maxstrlen);
2584 
2585 	return bcmerror;
2586 } /* dhdpcie_checkdied */
2587 
2588 
2589 /* Custom copy of dhdpcie_mem_dump() that can be called at interrupt level */
dhdpcie_mem_dump_bugcheck(dhd_bus_t * bus,uint8 * buf)2590 void dhdpcie_mem_dump_bugcheck(dhd_bus_t *bus, uint8 *buf)
2591 {
2592 	int ret = 0;
2593 	int size; /* Full mem size */
2594 	int start; /* Start address */
2595 	int read_size = 0; /* Read size of each iteration */
2596 	uint8 *databuf = buf;
2597 
2598 	if (bus == NULL) {
2599 		return;
2600 	}
2601 
2602 	start = bus->dongle_ram_base;
2603 	read_size = 4;
2604 	/* check for dead bus */
2605 	{
2606 		uint test_word = 0;
2607 		ret = dhdpcie_bus_membytes(bus, FALSE, start, (uint8*)&test_word, read_size);
2608 		/* if read error or bus timeout */
2609 		if (ret || (test_word == 0xFFFFFFFF)) {
2610 			return;
2611 		}
2612 	}
2613 
2614 	/* Get full mem size */
2615 	size = bus->ramsize;
2616 	/* Read mem content */
2617 	while (size)
2618 	{
2619 		read_size = MIN(MEMBLOCK, size);
2620 		if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size))) {
2621 			return;
2622 		}
2623 
2624 		/* Decrement size and increment start address */
2625 		size -= read_size;
2626 		start += read_size;
2627 		databuf += read_size;
2628 	}
2629 	bus->dhd->soc_ram = buf;
2630 	bus->dhd->soc_ram_length = bus->ramsize;
2631 	return;
2632 }
2633 
2634 
2635 #if defined(DHD_FW_COREDUMP)
2636 static int
dhdpcie_mem_dump(dhd_bus_t * bus)2637 dhdpcie_mem_dump(dhd_bus_t *bus)
2638 {
2639 	int ret = 0;
2640 	int size; /* Full mem size */
2641 	int start = bus->dongle_ram_base; /* Start address */
2642 	int read_size = 0; /* Read size of each iteration */
2643 	uint8 *buf = NULL, *databuf = NULL;
2644 
2645 #ifdef EXYNOS_PCIE_DEBUG
2646 	exynos_pcie_register_dump(1);
2647 #endif /* EXYNOS_PCIE_DEBUG */
2648 
2649 #ifdef SUPPORT_LINKDOWN_RECOVERY
2650 	if (bus->is_linkdown) {
2651 		DHD_ERROR(("%s: PCIe link is down so skip\n", __FUNCTION__));
2652 		return BCME_ERROR;
2653 	}
2654 #endif /* SUPPORT_LINKDOWN_RECOVERY */
2655 
2656 	/* Get full mem size */
2657 	size = bus->ramsize;
2658 	buf = dhd_get_fwdump_buf(bus->dhd, size);
2659 	if (!buf) {
2660 		DHD_ERROR(("%s: Out of memory (%d bytes)\n", __FUNCTION__, size));
2661 		return BCME_ERROR;
2662 	}
2663 
2664 	/* Read mem content */
2665 	DHD_TRACE_HW4(("Dump dongle memory\n"));
2666 	databuf = buf;
2667 	while (size)
2668 	{
2669 		read_size = MIN(MEMBLOCK, size);
2670 		if ((ret = dhdpcie_bus_membytes(bus, FALSE, start, databuf, read_size)))
2671 		{
2672 			DHD_ERROR(("%s: Error membytes %d\n", __FUNCTION__, ret));
2673 			bus->dhd->memdump_success = FALSE;
2674 			return BCME_ERROR;
2675 		}
2676 		DHD_TRACE(("."));
2677 
2678 		/* Decrement size and increment start address */
2679 		size -= read_size;
2680 		start += read_size;
2681 		databuf += read_size;
2682 	}
2683 	bus->dhd->memdump_success = TRUE;
2684 
2685 	dhd_schedule_memdump(bus->dhd, buf, bus->ramsize);
2686 	/* buf, actually soc_ram free handled in dhd_{free,clear} */
2687 
2688 	return ret;
2689 }
2690 
2691 int
dhd_bus_mem_dump(dhd_pub_t * dhdp)2692 dhd_bus_mem_dump(dhd_pub_t *dhdp)
2693 {
2694 	dhd_bus_t *bus = dhdp->bus;
2695 
2696 	if (dhdp->busstate == DHD_BUS_DOWN) {
2697 		DHD_ERROR(("%s bus is down\n", __FUNCTION__));
2698 		return BCME_ERROR;
2699 	}
2700 #ifdef DHD_PCIE_RUNTIMEPM
2701 	if (dhdp->memdump_type == DUMP_TYPE_BY_SYSDUMP) {
2702 		DHD_ERROR(("%s : bus wakeup by SYSDUMP\n", __FUNCTION__));
2703 		dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
2704 	}
2705 #endif /* DHD_PCIE_RUNTIMEPM */
2706 
2707 	if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
2708 		DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
2709 			__FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
2710 		return BCME_ERROR;
2711 	}
2712 
2713 	return dhdpcie_mem_dump(bus);
2714 }
2715 
2716 int
dhd_dongle_mem_dump(void)2717 dhd_dongle_mem_dump(void)
2718 {
2719 	if (!g_dhd_bus) {
2720 		DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__));
2721 		return -ENODEV;
2722 	}
2723 
2724 	dhd_bus_dump_console_buffer(g_dhd_bus);
2725 	dhd_prot_debug_info_print(g_dhd_bus->dhd);
2726 
2727 	g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
2728 	g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS;
2729 
2730 #ifdef DHD_PCIE_RUNTIMEPM
2731 	dhdpcie_runtime_bus_wake(g_dhd_bus->dhd, TRUE, __builtin_return_address(0));
2732 #endif /* DHD_PCIE_RUNTIMEPM */
2733 
2734 	DHD_OS_WAKE_LOCK(g_dhd_bus->dhd);
2735 	dhd_bus_mem_dump(g_dhd_bus->dhd);
2736 	DHD_OS_WAKE_UNLOCK(g_dhd_bus->dhd);
2737 	return 0;
2738 }
2739 EXPORT_SYMBOL(dhd_dongle_mem_dump);
2740 #endif	/* DHD_FW_COREDUMP */
2741 
2742 int
dhd_socram_dump(dhd_bus_t * bus)2743 dhd_socram_dump(dhd_bus_t *bus)
2744 {
2745 #ifdef DHD_PCIE_RUNTIMEPM
2746 	dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0));
2747 #endif /* DHD_PCIE_RUNTIMEPM */
2748 
2749 #if defined(DHD_FW_COREDUMP)
2750 	DHD_OS_WAKE_LOCK(bus->dhd);
2751 	dhd_bus_mem_dump(bus->dhd);
2752 	DHD_OS_WAKE_UNLOCK(bus->dhd);
2753 	return 0;
2754 #else
2755 	return -1;
2756 #endif
2757 }
2758 
2759 /**
2760  * Transfers bytes from host to dongle using pio mode.
2761  * Parameter 'address' is a backplane address.
2762  */
2763 static int
dhdpcie_bus_membytes(dhd_bus_t * bus,bool write,ulong address,uint8 * data,uint size)2764 dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size)
2765 {
2766 	uint dsize;
2767 	int detect_endian_flag = 0x01;
2768 	bool little_endian;
2769 
2770 	if (write && bus->is_linkdown) {
2771 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
2772 		return BCME_ERROR;
2773 	}
2774 
2775 
2776 	/* Detect endianness. */
2777 	little_endian = *(char *)&detect_endian_flag;
2778 
2779 	/* In remap mode, adjust address beyond socram and redirect
2780 	 * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
2781 	 * is not backplane accessible
2782 	 */
2783 
2784 	/* Determine initial transfer parameters */
2785 #ifdef DHD_SUPPORT_64BIT
2786 	dsize = sizeof(uint64);
2787 #else /* !DHD_SUPPORT_64BIT */
2788 	dsize = sizeof(uint32);
2789 #endif /* DHD_SUPPORT_64BIT */
2790 
2791 	/* Do the transfer(s) */
2792 	DHD_INFO(("%s: %s %d bytes in window 0x%08lx\n",
2793 	          __FUNCTION__, (write ? "write" : "read"), size, address));
2794 	if (write) {
2795 		while (size) {
2796 #ifdef DHD_SUPPORT_64BIT
2797 			if (size >= sizeof(uint64) && little_endian &&	!(address % 8)) {
2798 				dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
2799 			}
2800 #else /* !DHD_SUPPORT_64BIT */
2801 			if (size >= sizeof(uint32) && little_endian &&	!(address % 4)) {
2802 				dhdpcie_bus_wtcm32(bus, address, *((uint32*)data));
2803 			}
2804 #endif /* DHD_SUPPORT_64BIT */
2805 			else {
2806 				dsize = sizeof(uint8);
2807 				dhdpcie_bus_wtcm8(bus, address, *data);
2808 			}
2809 
2810 			/* Adjust for next transfer (if any) */
2811 			if ((size -= dsize)) {
2812 				data += dsize;
2813 				address += dsize;
2814 			}
2815 		}
2816 	} else {
2817 		while (size) {
2818 #ifdef DHD_SUPPORT_64BIT
2819 			if (size >= sizeof(uint64) && little_endian &&	!(address % 8))
2820 			{
2821 				*(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
2822 			}
2823 #else /* !DHD_SUPPORT_64BIT */
2824 			if (size >= sizeof(uint32) && little_endian &&	!(address % 4))
2825 			{
2826 				*(uint32 *)data = dhdpcie_bus_rtcm32(bus, address);
2827 			}
2828 #endif /* DHD_SUPPORT_64BIT */
2829 			else {
2830 				dsize = sizeof(uint8);
2831 				*data = dhdpcie_bus_rtcm8(bus, address);
2832 			}
2833 
2834 			/* Adjust for next transfer (if any) */
2835 			if ((size -= dsize) > 0) {
2836 				data += dsize;
2837 				address += dsize;
2838 			}
2839 		}
2840 	}
2841 	return BCME_OK;
2842 } /* dhdpcie_bus_membytes */
2843 
2844 /**
2845  * Transfers one transmit (ethernet) packet that was queued in the (flow controlled) flow ring queue
2846  * to the (non flow controlled) flow ring.
2847  */
2848 int BCMFASTPATH
dhd_bus_schedule_queue(struct dhd_bus * bus,uint16 flow_id,bool txs)2849 dhd_bus_schedule_queue(struct dhd_bus  *bus, uint16 flow_id, bool txs)
2850 {
2851 	flow_ring_node_t *flow_ring_node;
2852 	int ret = BCME_OK;
2853 #ifdef DHD_LOSSLESS_ROAMING
2854 	dhd_pub_t *dhdp = bus->dhd;
2855 #endif
2856 	DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
2857 
2858 	/* ASSERT on flow_id */
2859 	if (flow_id >= bus->max_submission_rings) {
2860 		DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__,
2861 			flow_id, bus->max_submission_rings));
2862 		return 0;
2863 	}
2864 
2865 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
2866 
2867 #ifdef DHD_LOSSLESS_ROAMING
2868 	if ((dhdp->dequeue_prec_map & (1 << flow_ring_node->flow_info.tid)) == 0) {
2869 		DHD_INFO(("%s: tid %d is not in precedence map. block scheduling\n",
2870 			__FUNCTION__, flow_ring_node->flow_info.tid));
2871 		return BCME_OK;
2872 	}
2873 #endif /* DHD_LOSSLESS_ROAMING */
2874 
2875 	{
2876 		unsigned long flags;
2877 		void *txp = NULL;
2878 		flow_queue_t *queue;
2879 #ifdef DHD_LOSSLESS_ROAMING
2880 		struct ether_header *eh;
2881 		uint8 *pktdata;
2882 #endif /* DHD_LOSSLESS_ROAMING */
2883 
2884 		queue = &flow_ring_node->queue; /* queue associated with flow ring */
2885 
2886 		DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
2887 
2888 		if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
2889 			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
2890 			return BCME_NOTREADY;
2891 		}
2892 
2893 		while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
2894 			if (bus->dhd->conf->orphan_move <= 1)
2895 				PKTORPHAN(txp, bus->dhd->conf->tsq);
2896 
2897 			/*
2898 			 * Modifying the packet length caused P2P cert failures.
2899 			 * Specifically on test cases where a packet of size 52 bytes
2900 			 * was injected, the sniffer capture showed 62 bytes because of
2901 			 * which the cert tests failed. So making the below change
2902 			 * only Router specific.
2903 			 */
2904 
2905 #ifdef DHDTCPACK_SUPPRESS
2906 			if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_HOLD) {
2907 				ret = dhd_tcpack_check_xmit(bus->dhd, txp);
2908 				if (ret != BCME_OK) {
2909 					DHD_ERROR(("%s: dhd_tcpack_check_xmit() error.\n",
2910 						__FUNCTION__));
2911 				}
2912 			}
2913 #endif /* DHDTCPACK_SUPPRESS */
2914 #ifdef DHD_LOSSLESS_ROAMING
2915 			pktdata = (uint8 *)PKTDATA(OSH_NULL, txp);
2916 			eh = (struct ether_header *) pktdata;
2917 			if (eh->ether_type == hton16(ETHER_TYPE_802_1X)) {
2918 				uint8 prio = (uint8)PKTPRIO(txp);
2919 
2920 				/* Restore to original priority for 802.1X packet */
2921 				if (prio == PRIO_8021D_NC) {
2922 					PKTSETPRIO(txp, dhdp->prio_8021x);
2923 				}
2924 			}
2925 #endif /* DHD_LOSSLESS_ROAMING */
2926 
2927 			/* Attempt to transfer packet over flow ring */
2928 			ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
2929 			if (ret != BCME_OK) { /* may not have resources in flow ring */
2930 				DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
2931 				dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
2932 				/* reinsert at head */
2933 				dhd_flow_queue_reinsert(bus->dhd, queue, txp);
2934 				DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
2935 
2936 				/* If we are able to requeue back, return success */
2937 				return BCME_OK;
2938 			}
2939 		}
2940 
2941 		dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
2942 
2943 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
2944 	}
2945 
2946 	return ret;
2947 } /* dhd_bus_schedule_queue */
2948 
2949 /** Sends an (ethernet) data frame (in 'txp') to the dongle. Callee disposes of txp. */
2950 int BCMFASTPATH
dhd_bus_txdata(struct dhd_bus * bus,void * txp,uint8 ifidx)2951 dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
2952 {
2953 	uint16 flowid;
2954 #ifdef IDLE_TX_FLOW_MGMT
2955 	uint8	node_status;
2956 #endif /* IDLE_TX_FLOW_MGMT */
2957 	flow_queue_t *queue;
2958 	flow_ring_node_t *flow_ring_node;
2959 	unsigned long flags;
2960 	int ret = BCME_OK;
2961 	void *txp_pend = NULL;
2962 
2963 	if (!bus->dhd->flowid_allocator) {
2964 		DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
2965 		goto toss;
2966 	}
2967 
2968 	flowid = DHD_PKT_GET_FLOWID(txp);
2969 
2970 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
2971 
2972 	DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
2973 		__FUNCTION__, flowid, flow_ring_node->status, flow_ring_node->active));
2974 
2975 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
2976 	if ((flowid >= bus->dhd->num_flow_rings) ||
2977 #ifdef IDLE_TX_FLOW_MGMT
2978 		(!flow_ring_node->active))
2979 #else
2980 		(!flow_ring_node->active) ||
2981 		(flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) ||
2982 		(flow_ring_node->status == FLOW_RING_STATUS_STA_FREEING))
2983 #endif /* IDLE_TX_FLOW_MGMT */
2984 	{
2985 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
2986 		DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
2987 			__FUNCTION__, flowid, flow_ring_node->status,
2988 			flow_ring_node->active));
2989 		ret = BCME_ERROR;
2990 			goto toss;
2991 	}
2992 
2993 #ifdef IDLE_TX_FLOW_MGMT
2994 	node_status = flow_ring_node->status;
2995 
2996 	/* handle diffrent status states here!! */
2997 	switch (node_status)
2998 	{
2999 		case FLOW_RING_STATUS_OPEN:
3000 
3001 			if (bus->enable_idle_flowring_mgmt) {
3002 				/* Move the node to the head of active list */
3003 				dhd_flow_ring_move_to_active_list_head(bus, flow_ring_node);
3004 			}
3005 			break;
3006 
3007 		case FLOW_RING_STATUS_SUSPENDED:
3008 			DHD_INFO(("Need to Initiate TX Flow resume\n"));
3009 			/* Issue resume_ring request */
3010 			dhd_bus_flow_ring_resume_request(bus,
3011 					flow_ring_node);
3012 			break;
3013 
3014 		case FLOW_RING_STATUS_CREATE_PENDING:
3015 		case FLOW_RING_STATUS_RESUME_PENDING:
3016 			/* Dont do anything here!! */
3017 			DHD_INFO(("Waiting for Flow create/resume! status is %u\n",
3018 				node_status));
3019 			break;
3020 
3021 		case FLOW_RING_STATUS_DELETE_PENDING:
3022 		default:
3023 			DHD_ERROR(("Dropping packet!! flowid %u status is %u\n",
3024 				flowid, node_status));
3025 			/* error here!! */
3026 			ret = BCME_ERROR;
3027 			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3028 			goto toss;
3029 	}
3030 	/* Now queue the packet */
3031 #endif /* IDLE_TX_FLOW_MGMT */
3032 
3033 	queue = &flow_ring_node->queue; /* queue associated with flow ring */
3034 
3035 	if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK)
3036 		txp_pend = txp;
3037 
3038 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3039 
3040 	if (flow_ring_node->status) {
3041 		DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
3042 		    __FUNCTION__, flowid, flow_ring_node->status,
3043 		    flow_ring_node->active));
3044 		if (txp_pend) {
3045 			txp = txp_pend;
3046 			goto toss;
3047 		}
3048 		return BCME_OK;
3049 	}
3050 	ret = dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
3051 
3052 	/* If we have anything pending, try to push into q */
3053 	if (txp_pend) {
3054 		DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
3055 
3056 		if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
3057 			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3058 			txp = txp_pend;
3059 			goto toss;
3060 		}
3061 
3062 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
3063 	}
3064 
3065 	return ret;
3066 
3067 toss:
3068 	DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
3069 /* for EFI, pass the 'send' flag as false, to avoid enqueuing the failed tx pkt
3070 * into the Tx done queue
3071 */
3072 #ifdef DHD_EFI
3073 	PKTCFREE(bus->dhd->osh, txp, FALSE);
3074 #else
3075 	PKTCFREE(bus->dhd->osh, txp, TRUE);
3076 #endif
3077 	return ret;
3078 } /* dhd_bus_txdata */
3079 
3080 
3081 void
dhd_bus_stop_queue(struct dhd_bus * bus)3082 dhd_bus_stop_queue(struct dhd_bus *bus)
3083 {
3084 	dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
3085 	bus->bus_flowctrl = TRUE;
3086 }
3087 
3088 void
dhd_bus_start_queue(struct dhd_bus * bus)3089 dhd_bus_start_queue(struct dhd_bus *bus)
3090 {
3091 	dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
3092 	bus->bus_flowctrl = TRUE;
3093 }
3094 
3095 /* Device console input function */
dhd_bus_console_in(dhd_pub_t * dhd,uchar * msg,uint msglen)3096 int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
3097 {
3098 	dhd_bus_t *bus = dhd->bus;
3099 	uint32 addr, val;
3100 	int rv;
3101 	/* Address could be zero if CONSOLE := 0 in dongle Makefile */
3102 	if (bus->console_addr == 0)
3103 		return BCME_UNSUPPORTED;
3104 
3105 	/* Don't allow input if dongle is in reset */
3106 	if (bus->dhd->dongle_reset) {
3107 		return BCME_NOTREADY;
3108 	}
3109 
3110 	/* Zero cbuf_index */
3111 	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
3112 	val = htol32(0);
3113 	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
3114 		goto done;
3115 
3116 	/* Write message into cbuf */
3117 	addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
3118 	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
3119 		goto done;
3120 
3121 	/* Write length into vcons_in */
3122 	addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
3123 	val = htol32(msglen);
3124 	if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
3125 		goto done;
3126 
3127 	/* generate an interrupt to dongle to indicate that it needs to process cons command */
3128 	dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT);
3129 done:
3130 	return rv;
3131 } /* dhd_bus_console_in */
3132 
3133 /**
3134  * Called on frame reception, the frame was received from the dongle on interface 'ifidx' and is
3135  * contained in 'pkt'. Processes rx frame, forwards up the layer to netif.
3136  */
3137 void BCMFASTPATH
dhd_bus_rx_frame(struct dhd_bus * bus,void * pkt,int ifidx,uint pkt_count)3138 dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count)
3139 {
3140 	dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
3141 }
3142 
3143 /** 'offset' is a backplane address */
3144 void
dhdpcie_bus_wtcm8(dhd_bus_t * bus,ulong offset,uint8 data)3145 dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
3146 {
3147 	W_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset), data);
3148 }
3149 
3150 uint8
dhdpcie_bus_rtcm8(dhd_bus_t * bus,ulong offset)3151 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
3152 {
3153 	volatile uint8 data;
3154 	data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset));
3155 	return data;
3156 }
3157 
3158 void
dhdpcie_bus_wtcm32(dhd_bus_t * bus,ulong offset,uint32 data)3159 dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
3160 {
3161 	W_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset), data);
3162 }
3163 void
dhdpcie_bus_wtcm16(dhd_bus_t * bus,ulong offset,uint16 data)3164 dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
3165 {
3166 	W_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset), data);
3167 }
3168 #ifdef DHD_SUPPORT_64BIT
3169 void
dhdpcie_bus_wtcm64(dhd_bus_t * bus,ulong offset,uint64 data)3170 dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
3171 {
3172 	W_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset), data);
3173 }
3174 #endif /* DHD_SUPPORT_64BIT */
3175 
3176 uint16
dhdpcie_bus_rtcm16(dhd_bus_t * bus,ulong offset)3177 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
3178 {
3179 	volatile uint16 data;
3180 	data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset));
3181 	return data;
3182 }
3183 
3184 uint32
dhdpcie_bus_rtcm32(dhd_bus_t * bus,ulong offset)3185 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
3186 {
3187 	volatile uint32 data;
3188 	data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset));
3189 	return data;
3190 }
3191 
3192 #ifdef DHD_SUPPORT_64BIT
3193 uint64
dhdpcie_bus_rtcm64(dhd_bus_t * bus,ulong offset)3194 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
3195 {
3196 	volatile uint64 data;
3197 	data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset));
3198 	return data;
3199 }
3200 #endif /* DHD_SUPPORT_64BIT */
3201 
3202 /** A snippet of dongle memory is shared between host and dongle */
3203 void
dhd_bus_cmn_writeshared(dhd_bus_t * bus,void * data,uint32 len,uint8 type,uint16 ringid)3204 dhd_bus_cmn_writeshared(dhd_bus_t *bus, void *data, uint32 len, uint8 type, uint16 ringid)
3205 {
3206 	uint64 long_data;
3207 	uintptr tcm_offset;
3208 
3209 	DHD_INFO(("%s: writing to dongle type %d len %d\n", __FUNCTION__, type, len));
3210 
3211 	if (bus->is_linkdown) {
3212 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
3213 		return;
3214 	}
3215 
3216 	switch (type) {
3217 		case D2H_DMA_SCRATCH_BUF:
3218 		{
3219 			pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
3220 			long_data = HTOL64(*(uint64 *)data);
3221 			tcm_offset = (uintptr)&(sh->host_dma_scratch_buffer);
3222 			dhdpcie_bus_membytes(bus, TRUE,
3223 				(ulong)tcm_offset, (uint8*) &long_data, len);
3224 			if (dhd_msg_level & DHD_INFO_VAL) {
3225 				prhex(__FUNCTION__, data, len);
3226 			}
3227 			break;
3228 		}
3229 
3230 		case D2H_DMA_SCRATCH_BUF_LEN :
3231 		{
3232 			pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
3233 			tcm_offset = (uintptr)&(sh->host_dma_scratch_buffer_len);
3234 			dhdpcie_bus_wtcm32(bus,
3235 				(ulong)tcm_offset, (uint32) HTOL32(*(uint32 *)data));
3236 			if (dhd_msg_level & DHD_INFO_VAL) {
3237 				prhex(__FUNCTION__, data, len);
3238 			}
3239 			break;
3240 		}
3241 
3242 		case H2D_DMA_INDX_WR_BUF:
3243 		{
3244 			pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
3245 
3246 			long_data = HTOL64(*(uint64 *)data);
3247 			tcm_offset = (uintptr)shmem->rings_info_ptr;
3248 			tcm_offset += OFFSETOF(ring_info_t, h2d_w_idx_hostaddr);
3249 			dhdpcie_bus_membytes(bus, TRUE,
3250 				(ulong)tcm_offset, (uint8*) &long_data, len);
3251 			if (dhd_msg_level & DHD_INFO_VAL) {
3252 				prhex(__FUNCTION__, data, len);
3253 			}
3254 			break;
3255 		}
3256 
3257 		case H2D_DMA_INDX_RD_BUF:
3258 		{
3259 			pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
3260 			long_data = HTOL64(*(uint64 *)data);
3261 			tcm_offset = (uintptr)shmem->rings_info_ptr;
3262 			tcm_offset += OFFSETOF(ring_info_t, h2d_r_idx_hostaddr);
3263 			dhdpcie_bus_membytes(bus, TRUE,
3264 				(ulong)tcm_offset, (uint8*) &long_data, len);
3265 			if (dhd_msg_level & DHD_INFO_VAL) {
3266 				prhex(__FUNCTION__, data, len);
3267 			}
3268 			break;
3269 		}
3270 
3271 		case D2H_DMA_INDX_WR_BUF:
3272 		{
3273 			pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
3274 			long_data = HTOL64(*(uint64 *)data);
3275 			tcm_offset = (uintptr)shmem->rings_info_ptr;
3276 			tcm_offset += OFFSETOF(ring_info_t, d2h_w_idx_hostaddr);
3277 			dhdpcie_bus_membytes(bus, TRUE,
3278 				(ulong)tcm_offset, (uint8*) &long_data, len);
3279 			if (dhd_msg_level & DHD_INFO_VAL) {
3280 				prhex(__FUNCTION__, data, len);
3281 			}
3282 			break;
3283 		}
3284 
3285 		case D2H_DMA_INDX_RD_BUF:
3286 		{
3287 			pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
3288 			long_data = HTOL64(*(uint64 *)data);
3289 			tcm_offset = (uintptr)shmem->rings_info_ptr;
3290 			tcm_offset += OFFSETOF(ring_info_t, d2h_r_idx_hostaddr);
3291 			dhdpcie_bus_membytes(bus, TRUE,
3292 				(ulong)tcm_offset, (uint8*) &long_data, len);
3293 			if (dhd_msg_level & DHD_INFO_VAL) {
3294 				prhex(__FUNCTION__, data, len);
3295 			}
3296 			break;
3297 		}
3298 
3299 		case H2D_IFRM_INDX_WR_BUF:
3300 		{
3301 			pciedev_shared_t *shmem = (pciedev_shared_t *)bus->pcie_sh;
3302 
3303 			long_data = HTOL64(*(uint64 *)data);
3304 			tcm_offset = (uintptr)shmem->rings_info_ptr;
3305 			tcm_offset += OFFSETOF(ring_info_t, ifrm_w_idx_hostaddr);
3306 			dhdpcie_bus_membytes(bus, TRUE,
3307 				(ulong)tcm_offset, (uint8*) &long_data, len);
3308 			if (dhd_msg_level & DHD_INFO_VAL) {
3309 				prhex(__FUNCTION__, data, len);
3310 			}
3311 			break;
3312 		}
3313 
3314 		case RING_ITEM_LEN :
3315 			tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
3316 			tcm_offset += OFFSETOF(ring_mem_t, len_items);
3317 			dhdpcie_bus_wtcm16(bus,
3318 				(ulong)tcm_offset, (uint16) HTOL16(*(uint16 *)data));
3319 			break;
3320 
3321 		case RING_MAX_ITEMS :
3322 			tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
3323 			tcm_offset += OFFSETOF(ring_mem_t, max_item);
3324 			dhdpcie_bus_wtcm16(bus,
3325 				(ulong)tcm_offset, (uint16) HTOL16(*(uint16 *)data));
3326 			break;
3327 
3328 		case RING_BUF_ADDR :
3329 			long_data = HTOL64(*(uint64 *)data);
3330 			tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
3331 			tcm_offset += OFFSETOF(ring_mem_t, base_addr);
3332 			dhdpcie_bus_membytes(bus, TRUE,
3333 				(ulong)tcm_offset, (uint8 *) &long_data, len);
3334 			if (dhd_msg_level & DHD_INFO_VAL) {
3335 				prhex(__FUNCTION__, data, len);
3336 			}
3337 			break;
3338 
3339 		case RING_WR_UPD :
3340 			tcm_offset = bus->ring_sh[ringid].ring_state_w;
3341 			dhdpcie_bus_wtcm16(bus,
3342 				(ulong)tcm_offset, (uint16) HTOL16(*(uint16 *)data));
3343 			break;
3344 
3345 		case RING_RD_UPD :
3346 			tcm_offset = bus->ring_sh[ringid].ring_state_r;
3347 			dhdpcie_bus_wtcm16(bus,
3348 				(ulong)tcm_offset, (uint16) HTOL16(*(uint16 *)data));
3349 			break;
3350 
3351 		case D2H_MB_DATA:
3352 			dhdpcie_bus_wtcm32(bus, bus->d2h_mb_data_ptr_addr,
3353 				(uint32) HTOL32(*(uint32 *)data));
3354 			break;
3355 
3356 		case H2D_MB_DATA:
3357 			dhdpcie_bus_wtcm32(bus, bus->h2d_mb_data_ptr_addr,
3358 				(uint32) HTOL32(*(uint32 *)data));
3359 			break;
3360 
3361 		case HOST_API_VERSION:
3362 		{
3363 			pciedev_shared_t *sh = (pciedev_shared_t*) bus->shared_addr;
3364 			tcm_offset = (uintptr)sh + OFFSETOF(pciedev_shared_t, host_cap);
3365 			dhdpcie_bus_wtcm32(bus,
3366 				(ulong)tcm_offset, (uint32) HTOL32(*(uint32 *)data));
3367 			break;
3368 		}
3369 
3370 		case DNGL_TO_HOST_TRAP_ADDR:
3371 		{
3372 			pciedev_shared_t *sh = (pciedev_shared_t*) bus->shared_addr;
3373 			long_data = HTOL64(*(uint64 *)data);
3374 			tcm_offset = (uintptr)&(sh->host_trap_addr);
3375 			dhdpcie_bus_membytes(bus, TRUE,
3376 				(ulong)tcm_offset, (uint8*) &long_data, len);
3377 			break;
3378 		}
3379 
3380 #ifdef HOFFLOAD_MODULES
3381 		case WRT_HOST_MODULE_ADDR:
3382 		{
3383 			pciedev_shared_t *sh = (pciedev_shared_t*) bus->shared_addr;
3384 			long_data = HTOL64(*(uint64 *)data);
3385 			tcm_offset = (uintptr)&(sh->hoffload_addr);
3386 			dhdpcie_bus_membytes(bus, TRUE,
3387 				(ulong)tcm_offset, (uint8*) &long_data, len);
3388 			break;
3389 		}
3390 #endif
3391 		default:
3392 			break;
3393 	}
3394 } /* dhd_bus_cmn_writeshared */
3395 
3396 /** A snippet of dongle memory is shared between host and dongle */
3397 void
dhd_bus_cmn_readshared(dhd_bus_t * bus,void * data,uint8 type,uint16 ringid)3398 dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
3399 {
3400 	ulong tcm_offset;
3401 
3402 	switch (type) {
3403 		case RING_WR_UPD :
3404 			tcm_offset = bus->ring_sh[ringid].ring_state_w;
3405 			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
3406 			break;
3407 		case RING_RD_UPD :
3408 			tcm_offset = bus->ring_sh[ringid].ring_state_r;
3409 			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
3410 			break;
3411 		case TOTAL_LFRAG_PACKET_CNT :
3412 		{
3413 			pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
3414 			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
3415 				(ulong)(uintptr) &sh->total_lfrag_pkt_cnt));
3416 			break;
3417 		}
3418 		case H2D_MB_DATA:
3419 			*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->h2d_mb_data_ptr_addr));
3420 			break;
3421 		case D2H_MB_DATA:
3422 			*(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->d2h_mb_data_ptr_addr));
3423 			break;
3424 		case MAX_HOST_RXBUFS :
3425 		{
3426 			pciedev_shared_t *sh = (pciedev_shared_t*)bus->shared_addr;
3427 			*(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
3428 				(ulong)(uintptr) &sh->max_host_rxbufs));
3429 			break;
3430 		}
3431 		default :
3432 			break;
3433 	}
3434 }
3435 
dhd_bus_get_sharedflags(dhd_bus_t * bus)3436 uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus)
3437 {
3438 	return ((pciedev_shared_t*)bus->pcie_sh)->flags;
3439 }
3440 
3441 void
dhd_bus_clearcounts(dhd_pub_t * dhdp)3442 dhd_bus_clearcounts(dhd_pub_t *dhdp)
3443 {
3444 }
3445 
3446 int
dhd_bus_iovar_op(dhd_pub_t * dhdp,const char * name,void * params,int plen,void * arg,int len,bool set)3447 dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
3448                  void *params, int plen, void *arg, int len, bool set)
3449 {
3450 	dhd_bus_t *bus = dhdp->bus;
3451 	const bcm_iovar_t *vi = NULL;
3452 	int bcmerror = BCME_UNSUPPORTED;
3453 	int val_size;
3454 	uint32 actionid;
3455 
3456 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3457 
3458 	ASSERT(name);
3459 	ASSERT(len >= 0);
3460 
3461 	/* Get MUST have return space */
3462 	ASSERT(set || (arg && len));
3463 
3464 	/* Set does NOT take qualifiers */
3465 	ASSERT(!set || (!params && !plen));
3466 
3467 	DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
3468 	         name, (set ? "set" : "get"), len, plen));
3469 
3470 	/* Look up var locally; if not found pass to host driver */
3471 	if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
3472 		goto exit;
3473 	}
3474 
3475 
3476 	/* set up 'params' pointer in case this is a set command so that
3477 	 * the convenience int and bool code can be common to set and get
3478 	 */
3479 	if (params == NULL) {
3480 		params = arg;
3481 		plen = len;
3482 	}
3483 
3484 	if (vi->type == IOVT_VOID)
3485 		val_size = 0;
3486 	else if (vi->type == IOVT_BUFFER)
3487 		val_size = len;
3488 	else
3489 		/* all other types are integer sized */
3490 		val_size = sizeof(int);
3491 
3492 	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
3493 	bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
3494 
3495 exit:
3496 	return bcmerror;
3497 } /* dhd_bus_iovar_op */
3498 
3499 #ifdef BCM_BUZZZ
3500 #include <bcm_buzzz.h>
3501 
3502 int
dhd_buzzz_dump_cntrs(char * p,uint32 * core,uint32 * log,const int num_counters)3503 dhd_buzzz_dump_cntrs(char *p, uint32 *core, uint32 *log,
3504 	const int num_counters)
3505 {
3506 	int bytes = 0;
3507 	uint32 ctr;
3508 	uint32 curr[BCM_BUZZZ_COUNTERS_MAX], prev[BCM_BUZZZ_COUNTERS_MAX];
3509 	uint32 delta[BCM_BUZZZ_COUNTERS_MAX];
3510 
3511 	/* Compute elapsed counter values per counter event type */
3512 	for (ctr = 0U; ctr < num_counters; ctr++) {
3513 		prev[ctr] = core[ctr];
3514 		curr[ctr] = *log++;
3515 		core[ctr] = curr[ctr];  /* saved for next log */
3516 
3517 		if (curr[ctr] < prev[ctr])
3518 			delta[ctr] = curr[ctr] + (~0U - prev[ctr]);
3519 		else
3520 			delta[ctr] = (curr[ctr] - prev[ctr]);
3521 
3522 		bytes += sprintf(p + bytes, "%12u ", delta[ctr]);
3523 	}
3524 
3525 	return bytes;
3526 }
3527 
3528 typedef union cm3_cnts { /* export this in bcm_buzzz.h */
3529 	uint32 u32;
3530 	uint8  u8[4];
3531 	struct {
3532 		uint8 cpicnt;
3533 		uint8 exccnt;
3534 		uint8 sleepcnt;
3535 		uint8 lsucnt;
3536 	};
3537 } cm3_cnts_t;
3538 
3539 int
dhd_bcm_buzzz_dump_cntrs6(char * p,uint32 * core,uint32 * log)3540 dhd_bcm_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 *log)
3541 {
3542 	int bytes = 0;
3543 
3544 	uint32 cyccnt, instrcnt;
3545 	cm3_cnts_t cm3_cnts;
3546 	uint8 foldcnt;
3547 
3548 	{   /* 32bit cyccnt */
3549 		uint32 curr, prev, delta;
3550 		prev = core[0]; curr = *log++; core[0] = curr;
3551 		if (curr < prev)
3552 			delta = curr + (~0U - prev);
3553 		else
3554 			delta = (curr - prev);
3555 
3556 		bytes += sprintf(p + bytes, "%12u ", delta);
3557 		cyccnt = delta;
3558 	}
3559 
3560 	{	/* Extract the 4 cnts: cpi, exc, sleep and lsu */
3561 		int i;
3562 		uint8 max8 = ~0;
3563 		cm3_cnts_t curr, prev, delta;
3564 		prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32;
3565 		for (i = 0; i < 4; i++) {
3566 			if (curr.u8[i] < prev.u8[i])
3567 				delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]);
3568 			else
3569 				delta.u8[i] = (curr.u8[i] - prev.u8[i]);
3570 			bytes += sprintf(p + bytes, "%4u ", delta.u8[i]);
3571 		}
3572 		cm3_cnts.u32 = delta.u32;
3573 	}
3574 
3575 	{   /* Extract the foldcnt from arg0 */
3576 		uint8 curr, prev, delta, max8 = ~0;
3577 		bcm_buzzz_arg0_t arg0; arg0.u32 = *log;
3578 		prev = core[2]; curr = arg0.klog.cnt; core[2] = curr;
3579 		if (curr < prev)
3580 			delta = curr + (max8 - prev);
3581 		else
3582 			delta = (curr - prev);
3583 		bytes += sprintf(p + bytes, "%4u ", delta);
3584 		foldcnt = delta;
3585 	}
3586 
3587 	instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2]
3588 		                 + cm3_cnts.u8[3]) + foldcnt;
3589 	if (instrcnt > 0xFFFFFF00)
3590 		bytes += sprintf(p + bytes, "[%10s] ", "~");
3591 	else
3592 		bytes += sprintf(p + bytes, "[%10u] ", instrcnt);
3593 	return bytes;
3594 }
3595 
3596 int
dhd_buzzz_dump_log(char * p,uint32 * core,uint32 * log,bcm_buzzz_t * buzzz)3597 dhd_buzzz_dump_log(char *p, uint32 *core, uint32 *log, bcm_buzzz_t *buzzz)
3598 {
3599 	int bytes = 0;
3600 	bcm_buzzz_arg0_t arg0;
3601 	static uint8 * fmt[] = BCM_BUZZZ_FMT_STRINGS;
3602 
3603 	if (buzzz->counters == 6) {
3604 		bytes += dhd_bcm_buzzz_dump_cntrs6(p, core, log);
3605 		log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
3606 	} else {
3607 		bytes += dhd_buzzz_dump_cntrs(p, core, log, buzzz->counters);
3608 		log += buzzz->counters; /* (N x 32bit) CR4=3, CA7=4 */
3609 	}
3610 
3611 	/* Dump the logged arguments using the registered formats */
3612 	arg0.u32 = *log++;
3613 
3614 	switch (arg0.klog.args) {
3615 		case 0:
3616 			bytes += sprintf(p + bytes, fmt[arg0.klog.id]);
3617 			break;
3618 		case 1:
3619 		{
3620 			uint32 arg1 = *log++;
3621 			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1);
3622 			break;
3623 		}
3624 		case 2:
3625 		{
3626 			uint32 arg1, arg2;
3627 			arg1 = *log++; arg2 = *log++;
3628 			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2);
3629 			break;
3630 		}
3631 		case 3:
3632 		{
3633 			uint32 arg1, arg2, arg3;
3634 			arg1 = *log++; arg2 = *log++; arg3 = *log++;
3635 			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3);
3636 			break;
3637 		}
3638 		case 4:
3639 		{
3640 			uint32 arg1, arg2, arg3, arg4;
3641 			arg1 = *log++; arg2 = *log++;
3642 			arg3 = *log++; arg4 = *log++;
3643 			bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1, arg2, arg3, arg4);
3644 			break;
3645 		}
3646 		default:
3647 			printf("%s: Maximum one argument supported\n", __FUNCTION__);
3648 			break;
3649 	}
3650 
3651 	bytes += sprintf(p + bytes, "\n");
3652 
3653 	return bytes;
3654 }
3655 
dhd_buzzz_dump(bcm_buzzz_t * buzzz_p,void * buffer_p,char * p)3656 void dhd_buzzz_dump(bcm_buzzz_t *buzzz_p, void *buffer_p, char *p)
3657 {
3658 	int i;
3659 	uint32 total, part1, part2, log_sz, core[BCM_BUZZZ_COUNTERS_MAX];
3660 	void * log;
3661 
3662 	for (i = 0; i < BCM_BUZZZ_COUNTERS_MAX; i++) {
3663 		core[i] = 0;
3664 	}
3665 
3666 	log_sz = buzzz_p->log_sz;
3667 
3668 	part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz;
3669 
3670 	if (buzzz_p->wrap == TRUE) {
3671 		part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz;
3672 		total = (buzzz_p->buffer_sz - BCM_BUZZZ_LOGENTRY_MAXSZ) / log_sz;
3673 	} else {
3674 		part2 = 0U;
3675 		total = buzzz_p->count;
3676 	}
3677 
3678 	if (total == 0U) {
3679 		printf("%s: bcm_buzzz_dump total<%u> done\n", __FUNCTION__, total);
3680 		return;
3681 	} else {
3682 		printf("%s: bcm_buzzz_dump total<%u> : part2<%u> + part1<%u>\n", __FUNCTION__,
3683 		       total, part2, part1);
3684 	}
3685 
3686 	if (part2) {   /* with wrap */
3687 		log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log));
3688 		while (part2--) {   /* from cur to end : part2 */
3689 			p[0] = '\0';
3690 			dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
3691 			printf("%s", p);
3692 			log = (void*)((size_t)log + buzzz_p->log_sz);
3693 		}
3694 	}
3695 
3696 	log = (void*)buffer_p;
3697 	while (part1--) {
3698 		p[0] = '\0';
3699 		dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
3700 		printf("%s", p);
3701 		log = (void*)((size_t)log + buzzz_p->log_sz);
3702 	}
3703 
3704 	printf("%s: bcm_buzzz_dump done.\n", __FUNCTION__);
3705 }
3706 
dhd_buzzz_dump_dngl(dhd_bus_t * bus)3707 int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
3708 {
3709 	bcm_buzzz_t * buzzz_p = NULL;
3710 	void * buffer_p = NULL;
3711 	char * page_p = NULL;
3712 	pciedev_shared_t *sh;
3713 	int ret = 0;
3714 
3715 	if (bus->dhd->busstate != DHD_BUS_DATA) {
3716 		return BCME_UNSUPPORTED;
3717 	}
3718 	if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) {
3719 		printf("%s: Page memory allocation failure\n", __FUNCTION__);
3720 		goto done;
3721 	}
3722 	if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(bcm_buzzz_t))) == NULL) {
3723 		printf("%s: BCM BUZZZ memory allocation failure\n", __FUNCTION__);
3724 		goto done;
3725 	}
3726 
3727 	ret = dhdpcie_readshared(bus);
3728 	if (ret < 0) {
3729 		DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
3730 		goto done;
3731 	}
3732 
3733 	sh = bus->pcie_sh;
3734 
3735 	DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzz_dbg_ptr));
3736 
3737 	if (sh->buzz_dbg_ptr != 0U) {	/* Fetch and display dongle BUZZZ Trace */
3738 
3739 		dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzz_dbg_ptr,
3740 		                     (uint8 *)buzzz_p, sizeof(bcm_buzzz_t));
3741 
3742 		printf("BUZZZ[0x%08x]: log<0x%08x> cur<0x%08x> end<0x%08x> "
3743 			"count<%u> status<%u> wrap<%u>\n"
3744 			"cpu<0x%02X> counters<%u> group<%u> buffer_sz<%u> log_sz<%u>\n",
3745 			(int)sh->buzz_dbg_ptr,
3746 			(int)buzzz_p->log, (int)buzzz_p->cur, (int)buzzz_p->end,
3747 			buzzz_p->count, buzzz_p->status, buzzz_p->wrap,
3748 			buzzz_p->cpu_idcode, buzzz_p->counters, buzzz_p->group,
3749 			buzzz_p->buffer_sz, buzzz_p->log_sz);
3750 
3751 		if (buzzz_p->count == 0) {
3752 			printf("%s: Empty dongle BUZZZ trace\n\n", __FUNCTION__);
3753 			goto done;
3754 		}
3755 
3756 		/* Allocate memory for trace buffer and format strings */
3757 		buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
3758 		if (buffer_p == NULL) {
3759 			printf("%s: Buffer memory allocation failure\n", __FUNCTION__);
3760 			goto done;
3761 		}
3762 
3763 		/* Fetch the trace. format strings are exported via bcm_buzzz.h */
3764 		dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log,   /* Trace */
3765 		                     (uint8 *)buffer_p, buzzz_p->buffer_sz);
3766 
3767 		/* Process and display the trace using formatted output */
3768 
3769 		{
3770 			int ctr;
3771 			for (ctr = 0; ctr < buzzz_p->counters; ctr++) {
3772 				printf("<Evt[%02X]> ", buzzz_p->eventid[ctr]);
3773 			}
3774 			printf("<code execution point>\n");
3775 		}
3776 
3777 		dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
3778 
3779 		printf("%s: ----- End of dongle BCM BUZZZ Trace -----\n\n", __FUNCTION__);
3780 
3781 		MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL;
3782 	}
3783 
3784 done:
3785 
3786 	if (page_p)   MFREE(bus->dhd->osh, page_p, 4096);
3787 	if (buzzz_p)  MFREE(bus->dhd->osh, buzzz_p, sizeof(bcm_buzzz_t));
3788 	if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
3789 
3790 	return BCME_OK;
3791 }
3792 #endif /* BCM_BUZZZ */
3793 
3794 #define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) &&	\
3795 	((sih)->buscoretype == PCIE2_CORE_ID))
3796 
3797 int
dhd_bus_devreset(dhd_pub_t * dhdp,uint8 flag)3798 dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
3799 {
3800 	dhd_bus_t *bus = dhdp->bus;
3801 	int bcmerror = 0;
3802 	unsigned long flags;
3803 #ifdef CONFIG_ARCH_MSM
3804 	int retry = POWERUP_MAX_RETRY;
3805 #endif /* CONFIG_ARCH_MSM */
3806 
3807 	if (dhd_download_fw_on_driverload) {
3808 		bcmerror = dhd_bus_start(dhdp);
3809 	} else {
3810 		if (flag == TRUE) { /* Turn off WLAN */
3811 			/* Removing Power */
3812 			DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
3813 
3814 			bus->dhd->up = FALSE;
3815 
3816 			if (bus->dhd->busstate != DHD_BUS_DOWN) {
3817 				dhdpcie_advertise_bus_cleanup(bus->dhd);
3818 				if (bus->intr) {
3819 					dhdpcie_bus_intr_disable(bus);
3820 					dhdpcie_free_irq(bus);
3821 				}
3822 #ifdef BCMPCIE_OOB_HOST_WAKE
3823 				/* Clean up any pending host wake IRQ */
3824 				dhd_bus_oob_intr_set(bus->dhd, FALSE);
3825 				dhd_bus_oob_intr_unregister(bus->dhd);
3826 #endif /* BCMPCIE_OOB_HOST_WAKE */
3827 				dhd_os_wd_timer(dhdp, 0);
3828 				dhd_bus_stop(bus, TRUE);
3829 				dhd_prot_reset(dhdp);
3830 				dhd_clear(dhdp);
3831 				dhd_bus_release_dongle(bus);
3832 				dhdpcie_bus_free_resource(bus);
3833 				bcmerror = dhdpcie_bus_disable_device(bus);
3834 				if (bcmerror) {
3835 					DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
3836 						__FUNCTION__, bcmerror));
3837 					goto done;
3838 				}
3839 #ifdef CONFIG_ARCH_MSM
3840 				bcmerror = dhdpcie_bus_clock_stop(bus);
3841 				if (bcmerror) {
3842 					DHD_ERROR(("%s: host clock stop failed: %d\n",
3843 						__FUNCTION__, bcmerror));
3844 					goto done;
3845 				}
3846 #endif /* CONFIG_ARCH_MSM */
3847 				DHD_GENERAL_LOCK(bus->dhd, flags);
3848 				bus->dhd->busstate = DHD_BUS_DOWN;
3849 				DHD_GENERAL_UNLOCK(bus->dhd, flags);
3850 			} else {
3851 				if (bus->intr) {
3852 					dhdpcie_free_irq(bus);
3853 				}
3854 #ifdef BCMPCIE_OOB_HOST_WAKE
3855 				/* Clean up any pending host wake IRQ */
3856 				dhd_bus_oob_intr_set(bus->dhd, FALSE);
3857 				dhd_bus_oob_intr_unregister(bus->dhd);
3858 #endif /* BCMPCIE_OOB_HOST_WAKE */
3859 				dhd_dpc_kill(bus->dhd);
3860 				dhd_prot_reset(dhdp);
3861 				dhd_clear(dhdp);
3862 				dhd_bus_release_dongle(bus);
3863 				dhdpcie_bus_free_resource(bus);
3864 				bcmerror = dhdpcie_bus_disable_device(bus);
3865 				if (bcmerror) {
3866 					DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
3867 						__FUNCTION__, bcmerror));
3868 					goto done;
3869 				}
3870 
3871 #ifdef CONFIG_ARCH_MSM
3872 				bcmerror = dhdpcie_bus_clock_stop(bus);
3873 				if (bcmerror) {
3874 					DHD_ERROR(("%s: host clock stop failed: %d\n",
3875 						__FUNCTION__, bcmerror));
3876 					goto done;
3877 				}
3878 #endif  /* CONFIG_ARCH_MSM */
3879 			}
3880 
3881 			bus->dhd->dongle_reset = TRUE;
3882 			DHD_ERROR(("%s:  WLAN OFF Done\n", __FUNCTION__));
3883 
3884 		} else { /* Turn on WLAN */
3885 			if (bus->dhd->busstate == DHD_BUS_DOWN) {
3886 				/* Powering On */
3887 				DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
3888 #ifdef CONFIG_ARCH_MSM
3889 				while (--retry) {
3890 					bcmerror = dhdpcie_bus_clock_start(bus);
3891 					if (!bcmerror) {
3892 						DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
3893 							__FUNCTION__));
3894 						break;
3895 					} else {
3896 						OSL_SLEEP(10);
3897 					}
3898 				}
3899 
3900 				if (bcmerror && !retry) {
3901 					DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
3902 						__FUNCTION__, bcmerror));
3903 					goto done;
3904 				}
3905 #endif /* CONFIG_ARCH_MSM */
3906 				bus->is_linkdown = 0;
3907 #ifdef SUPPORT_LINKDOWN_RECOVERY
3908 				bus->read_shm_fail = FALSE;
3909 #endif /* SUPPORT_LINKDOWN_RECOVERY */
3910 				bcmerror = dhdpcie_bus_enable_device(bus);
3911 				if (bcmerror) {
3912 					DHD_ERROR(("%s: host configuration restore failed: %d\n",
3913 						__FUNCTION__, bcmerror));
3914 					goto done;
3915 				}
3916 
3917 				bcmerror = dhdpcie_bus_alloc_resource(bus);
3918 				if (bcmerror) {
3919 					DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
3920 						__FUNCTION__, bcmerror));
3921 					goto done;
3922 				}
3923 
3924 				bcmerror = dhdpcie_bus_dongle_attach(bus);
3925 				if (bcmerror) {
3926 					DHD_ERROR(("%s: dhdpcie_bus_dongle_attach failed: %d\n",
3927 						__FUNCTION__, bcmerror));
3928 					goto done;
3929 				}
3930 
3931 				bcmerror = dhd_bus_request_irq(bus);
3932 				if (bcmerror) {
3933 					DHD_ERROR(("%s: dhd_bus_request_irq failed: %d\n",
3934 						__FUNCTION__, bcmerror));
3935 					goto done;
3936 				}
3937 
3938 				bus->dhd->dongle_reset = FALSE;
3939 
3940 				bcmerror = dhd_bus_start(dhdp);
3941 				if (bcmerror) {
3942 					DHD_ERROR(("%s: dhd_bus_start: %d\n",
3943 						__FUNCTION__, bcmerror));
3944 					goto done;
3945 				}
3946 
3947 				bus->dhd->up = TRUE;
3948 				DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
3949 			} else {
3950 				DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
3951 				goto done;
3952 			}
3953 		}
3954 	}
3955 
3956 done:
3957 	if (bcmerror) {
3958 		DHD_GENERAL_LOCK(bus->dhd, flags);
3959 		bus->dhd->busstate = DHD_BUS_DOWN;
3960 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
3961 	}
3962 
3963 	return bcmerror;
3964 }
3965 
3966 static int
dhdpcie_bus_doiovar(dhd_bus_t * bus,const bcm_iovar_t * vi,uint32 actionid,const char * name,void * params,int plen,void * arg,int len,int val_size)3967 dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
3968                 void *params, int plen, void *arg, int len, int val_size)
3969 {
3970 	int bcmerror = 0;
3971 	int32 int_val = 0;
3972 	int32 int_val2 = 0;
3973 	int32 int_val3 = 0;
3974 	bool bool_val = 0;
3975 
3976 	DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
3977 	           __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
3978 
3979 	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
3980 		goto exit;
3981 
3982 	if (plen >= (int)sizeof(int_val))
3983 		bcopy(params, &int_val, sizeof(int_val));
3984 
3985 	if (plen >= (int)sizeof(int_val) * 2)
3986 		bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2));
3987 
3988 	if (plen >= (int)sizeof(int_val) * 3)
3989 		bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3));
3990 
3991 	bool_val = (int_val != 0) ? TRUE : FALSE;
3992 
3993 	/* Check if dongle is in reset. If so, only allow DEVRESET iovars */
3994 	if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
3995 	                                actionid == IOV_GVAL(IOV_DEVRESET))) {
3996 		bcmerror = BCME_NOTREADY;
3997 		goto exit;
3998 	}
3999 
4000 	switch (actionid) {
4001 
4002 
4003 	case IOV_SVAL(IOV_VARS):
4004 		bcmerror = dhdpcie_downloadvars(bus, arg, len);
4005 		break;
4006 	case IOV_SVAL(IOV_PCIE_LPBK):
4007 		bcmerror = dhdpcie_bus_lpback_req(bus, int_val);
4008 		break;
4009 
4010 	case IOV_SVAL(IOV_PCIE_DMAXFER): {
4011 		int int_val4 = 0;
4012 		if (plen >= (int)sizeof(int_val) * 4) {
4013 			bcopy((void*)((uintptr)params + 3 * sizeof(int_val)),
4014 				&int_val4, sizeof(int_val4));
4015 		}
4016 		bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3, int_val4);
4017 		break;
4018 	}
4019 
4020 #ifdef DEVICE_TX_STUCK_DETECT
4021 	case IOV_GVAL(IOV_DEVICE_TX_STUCK_DETECT):
4022 		int_val = bus->dev_tx_stuck_monitor;
4023 		bcopy(&int_val, arg, val_size);
4024 		break;
4025 	case IOV_SVAL(IOV_DEVICE_TX_STUCK_DETECT):
4026 		bus->dev_tx_stuck_monitor = (bool)int_val;
4027 		break;
4028 #endif /* DEVICE_TX_STUCK_DETECT */
4029 	case IOV_GVAL(IOV_PCIE_SUSPEND):
4030 		int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
4031 		bcopy(&int_val, arg, val_size);
4032 		break;
4033 
4034 	case IOV_SVAL(IOV_PCIE_SUSPEND):
4035 		if (bool_val) { /* Suspend */
4036 			int ret;
4037 			unsigned long flags;
4038 
4039 			/*
4040 			 * If some other context is busy, wait until they are done,
4041 			 * before starting suspend
4042 			 */
4043 			ret = dhd_os_busbusy_wait_condition(bus->dhd,
4044 				&bus->dhd->dhd_bus_busy_state, DHD_BUS_BUSY_IN_DHD_IOVAR);
4045 			if (ret == 0) {
4046 				DHD_ERROR(("%s:Wait Timedout, dhd_bus_busy_state = 0x%x\n",
4047 					__FUNCTION__, bus->dhd->dhd_bus_busy_state));
4048 				return BCME_BUSY;
4049 			}
4050 
4051 			DHD_GENERAL_LOCK(bus->dhd, flags);
4052 			DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
4053 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
4054 
4055 			dhdpcie_bus_suspend(bus, TRUE);
4056 
4057 			DHD_GENERAL_LOCK(bus->dhd, flags);
4058 			DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
4059 			dhd_os_busbusy_wake(bus->dhd);
4060 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
4061 		} else { /* Resume */
4062 			unsigned long flags;
4063 			DHD_GENERAL_LOCK(bus->dhd, flags);
4064 			DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
4065 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
4066 
4067 			dhdpcie_bus_suspend(bus, FALSE);
4068 
4069 			DHD_GENERAL_LOCK(bus->dhd, flags);
4070 			DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
4071 			dhd_os_busbusy_wake(bus->dhd);
4072 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
4073 		}
4074 		break;
4075 
4076 	case IOV_GVAL(IOV_MEMSIZE):
4077 		int_val = (int32)bus->ramsize;
4078 		bcopy(&int_val, arg, val_size);
4079 		break;
4080 
4081 #ifdef BCM_BUZZZ
4082 	/* Dump dongle side buzzz trace to console */
4083 	case IOV_GVAL(IOV_BUZZZ_DUMP):
4084 		bcmerror = dhd_buzzz_dump_dngl(bus);
4085 		break;
4086 #endif /* BCM_BUZZZ */
4087 
4088 	case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
4089 		bcmerror = dhdpcie_bus_download_state(bus, bool_val);
4090 		break;
4091 
4092 	case IOV_GVAL(IOV_RAMSIZE):
4093 		int_val = (int32)bus->ramsize;
4094 		bcopy(&int_val, arg, val_size);
4095 		break;
4096 
4097 	case IOV_SVAL(IOV_RAMSIZE):
4098 		bus->ramsize = int_val;
4099 		bus->orig_ramsize = int_val;
4100 		break;
4101 
4102 	case IOV_GVAL(IOV_RAMSTART):
4103 		int_val = (int32)bus->dongle_ram_base;
4104 		bcopy(&int_val, arg, val_size);
4105 		break;
4106 
4107 	case IOV_GVAL(IOV_CC_NVMSHADOW):
4108 	{
4109 		struct bcmstrbuf dump_b;
4110 
4111 		bcm_binit(&dump_b, arg, len);
4112 		bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b);
4113 		break;
4114 	}
4115 
4116 	case IOV_GVAL(IOV_SLEEP_ALLOWED):
4117 		bool_val = bus->sleep_allowed;
4118 		bcopy(&bool_val, arg, val_size);
4119 		break;
4120 
4121 	case IOV_SVAL(IOV_SLEEP_ALLOWED):
4122 		bus->sleep_allowed = bool_val;
4123 		break;
4124 
4125 	case IOV_GVAL(IOV_DONGLEISOLATION):
4126 		int_val = bus->dhd->dongle_isolation;
4127 		bcopy(&int_val, arg, val_size);
4128 		break;
4129 
4130 	case IOV_SVAL(IOV_DONGLEISOLATION):
4131 		bus->dhd->dongle_isolation = bool_val;
4132 		break;
4133 
4134 	case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD):
4135 		int_val = bus->ltrsleep_on_unload;
4136 		bcopy(&int_val, arg, val_size);
4137 		break;
4138 
4139 	case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD):
4140 		bus->ltrsleep_on_unload = bool_val;
4141 		break;
4142 
4143 	case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK):
4144 	{
4145 		struct bcmstrbuf dump_b;
4146 		bcm_binit(&dump_b, arg, len);
4147 		bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b);
4148 		break;
4149 	}
4150 	case IOV_GVAL(IOV_DMA_RINGINDICES):
4151 	{	int h2d_support, d2h_support;
4152 
4153 		d2h_support = bus->dhd->dma_d2h_ring_upd_support ? 1 : 0;
4154 		h2d_support = bus->dhd->dma_h2d_ring_upd_support ? 1 : 0;
4155 		int_val = d2h_support | (h2d_support << 1);
4156 		bcopy(&int_val, arg, sizeof(int_val));
4157 		break;
4158 	}
4159 	case IOV_SVAL(IOV_DMA_RINGINDICES):
4160 		/* Can change it only during initialization/FW download */
4161 		if (bus->dhd->busstate == DHD_BUS_DOWN) {
4162 			if ((int_val > 3) || (int_val < 0)) {
4163 				DHD_ERROR(("%s: Bad argument. Possible values: 0, 1, 2 & 3\n", __FUNCTION__));
4164 				bcmerror = BCME_BADARG;
4165 			} else {
4166 				bus->dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
4167 				bus->dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
4168 				bus->dhd->dma_ring_upd_overwrite = TRUE;
4169 			}
4170 		} else {
4171 			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
4172 				__FUNCTION__));
4173 			bcmerror = BCME_NOTDOWN;
4174 		}
4175 		break;
4176 
4177 	case IOV_GVAL(IOV_METADATA_DBG):
4178 		int_val = dhd_prot_metadata_dbg_get(bus->dhd);
4179 		bcopy(&int_val, arg, val_size);
4180 		break;
4181 	case IOV_SVAL(IOV_METADATA_DBG):
4182 		dhd_prot_metadata_dbg_set(bus->dhd, (int_val != 0));
4183 		break;
4184 
4185 	case IOV_GVAL(IOV_RX_METADATALEN):
4186 		int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE);
4187 		bcopy(&int_val, arg, val_size);
4188 		break;
4189 
4190 	case IOV_SVAL(IOV_RX_METADATALEN):
4191 		if (int_val > 64) {
4192 			bcmerror = BCME_BUFTOOLONG;
4193 			break;
4194 		}
4195 		dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE);
4196 		break;
4197 
4198 	case IOV_SVAL(IOV_TXP_THRESHOLD):
4199 		dhd_prot_txp_threshold(bus->dhd, TRUE, int_val);
4200 		break;
4201 
4202 	case IOV_GVAL(IOV_TXP_THRESHOLD):
4203 		int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val);
4204 		bcopy(&int_val, arg, val_size);
4205 		break;
4206 
4207 	case IOV_SVAL(IOV_DB1_FOR_MB):
4208 		if (int_val)
4209 			bus->db1_for_mb = TRUE;
4210 		else
4211 			bus->db1_for_mb = FALSE;
4212 		break;
4213 
4214 	case IOV_GVAL(IOV_DB1_FOR_MB):
4215 		if (bus->db1_for_mb)
4216 			int_val = 1;
4217 		else
4218 			int_val = 0;
4219 		bcopy(&int_val, arg, val_size);
4220 		break;
4221 
4222 	case IOV_GVAL(IOV_TX_METADATALEN):
4223 		int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE);
4224 		bcopy(&int_val, arg, val_size);
4225 		break;
4226 
4227 	case IOV_SVAL(IOV_TX_METADATALEN):
4228 		if (int_val > 64) {
4229 			bcmerror = BCME_BUFTOOLONG;
4230 			break;
4231 		}
4232 		dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE);
4233 		break;
4234 
4235 	case IOV_SVAL(IOV_DEVRESET):
4236 		dhd_bus_devreset(bus->dhd, (uint8)bool_val);
4237 		break;
4238 	case IOV_SVAL(IOV_FORCE_FW_TRAP):
4239 		if (bus->dhd->busstate == DHD_BUS_DATA)
4240 			dhdpcie_fw_trap(bus);
4241 		else {
4242 			DHD_ERROR(("%s: Bus is NOT up\n", __FUNCTION__));
4243 			bcmerror = BCME_NOTUP;
4244 		}
4245 		break;
4246 	case IOV_GVAL(IOV_FLOW_PRIO_MAP):
4247 		int_val = bus->dhd->flow_prio_map_type;
4248 		bcopy(&int_val, arg, val_size);
4249 		break;
4250 
4251 	case IOV_SVAL(IOV_FLOW_PRIO_MAP):
4252 		int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val);
4253 		bcopy(&int_val, arg, val_size);
4254 		break;
4255 
4256 #ifdef DHD_PCIE_RUNTIMEPM
4257 	case IOV_GVAL(IOV_IDLETIME):
4258 		int_val = bus->idletime;
4259 		bcopy(&int_val, arg, val_size);
4260 		break;
4261 
4262 	case IOV_SVAL(IOV_IDLETIME):
4263 		if (int_val < 0) {
4264 			bcmerror = BCME_BADARG;
4265 		} else {
4266 			bus->idletime = int_val;
4267 			if (bus->idletime) {
4268 				DHD_ENABLE_RUNTIME_PM(bus->dhd);
4269 			} else {
4270 				DHD_DISABLE_RUNTIME_PM(bus->dhd);
4271 			}
4272 		}
4273 		break;
4274 #endif /* DHD_PCIE_RUNTIMEPM */
4275 
4276 	case IOV_GVAL(IOV_TXBOUND):
4277 		int_val = (int32)dhd_txbound;
4278 		bcopy(&int_val, arg, val_size);
4279 		break;
4280 
4281 	case IOV_SVAL(IOV_TXBOUND):
4282 		dhd_txbound = (uint)int_val;
4283 		break;
4284 
4285 	case IOV_SVAL(IOV_H2D_MAILBOXDATA):
4286 		dhdpcie_send_mb_data(bus, (uint)int_val);
4287 		break;
4288 
4289 	case IOV_SVAL(IOV_INFORINGS):
4290 		dhd_prot_init_info_rings(bus->dhd);
4291 		break;
4292 
4293 	case IOV_SVAL(IOV_H2D_PHASE):
4294 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
4295 			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
4296 				__FUNCTION__));
4297 			bcmerror = BCME_NOTDOWN;
4298 			break;
4299 		}
4300 		if (int_val)
4301 			bus->dhd->h2d_phase_supported = TRUE;
4302 		else
4303 			bus->dhd->h2d_phase_supported = FALSE;
4304 		break;
4305 
4306 	case IOV_GVAL(IOV_H2D_PHASE):
4307 		int_val = (int32) bus->dhd->h2d_phase_supported;
4308 		bcopy(&int_val, arg, val_size);
4309 		break;
4310 
4311 	case IOV_SVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
4312 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
4313 			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
4314 				__FUNCTION__));
4315 			bcmerror = BCME_NOTDOWN;
4316 			break;
4317 		}
4318 		if (int_val)
4319 			bus->dhd->force_dongletrap_on_bad_h2d_phase = TRUE;
4320 		else
4321 			bus->dhd->force_dongletrap_on_bad_h2d_phase = FALSE;
4322 		break;
4323 
4324 	case IOV_GVAL(IOV_H2D_ENABLE_TRAP_BADPHASE):
4325 		int_val = (int32) bus->dhd->force_dongletrap_on_bad_h2d_phase;
4326 		bcopy(&int_val, arg, val_size);
4327 		break;
4328 
4329 	case IOV_SVAL(IOV_H2D_TXPOST_MAX_ITEM):
4330 		if (bus->dhd->busstate != DHD_BUS_DOWN) {
4331 			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
4332 				__FUNCTION__));
4333 			bcmerror = BCME_NOTDOWN;
4334 			break;
4335 		}
4336 		dhd_prot_set_h2d_max_txpost(bus->dhd, (uint16)int_val);
4337 		break;
4338 
4339 	case IOV_GVAL(IOV_H2D_TXPOST_MAX_ITEM):
4340 		int_val = dhd_prot_get_h2d_max_txpost(bus->dhd);
4341 		bcopy(&int_val, arg, val_size);
4342 		break;
4343 
4344 	case IOV_GVAL(IOV_RXBOUND):
4345 		int_val = (int32)dhd_rxbound;
4346 		bcopy(&int_val, arg, val_size);
4347 		break;
4348 
4349 	case IOV_SVAL(IOV_RXBOUND):
4350 		dhd_rxbound = (uint)int_val;
4351 		break;
4352 
4353 	case IOV_GVAL(IOV_TRAPDATA):
4354 	{
4355 		struct bcmstrbuf dump_b;
4356 		bcm_binit(&dump_b, arg, len);
4357 		bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, FALSE);
4358 		break;
4359 	}
4360 
4361 	case IOV_GVAL(IOV_TRAPDATA_RAW):
4362 	{
4363 		struct bcmstrbuf dump_b;
4364 		bcm_binit(&dump_b, arg, len);
4365 		bcmerror = dhd_prot_dump_extended_trap(bus->dhd, &dump_b, TRUE);
4366 		break;
4367 	}
4368 	case IOV_SVAL(IOV_HANGREPORT):
4369 		bus->dhd->hang_report = bool_val;
4370 		DHD_ERROR(("%s: Set hang_report as %d\n",
4371 			__FUNCTION__, bus->dhd->hang_report));
4372 		break;
4373 
4374 	case IOV_GVAL(IOV_HANGREPORT):
4375 		int_val = (int32)bus->dhd->hang_report;
4376 		bcopy(&int_val, arg, val_size);
4377 		break;
4378 
4379 	case IOV_SVAL(IOV_CTO_PREVENTION):
4380 		{
4381 			uint32 pcie_lnkst;
4382 
4383 			if (bus->sih->buscorerev < 19) {
4384 				bcmerror = BCME_UNSUPPORTED;
4385 				break;
4386 			}
4387 			si_corereg(bus->sih, bus->sih->buscoreidx,
4388 					OFFSETOF(sbpcieregs_t, configaddr), ~0, PCI_LINK_STATUS);
4389 
4390 			pcie_lnkst = si_corereg(bus->sih, bus->sih->buscoreidx,
4391 				OFFSETOF(sbpcieregs_t, configdata), 0, 0);
4392 
4393 			/* 4347A0 in PCIEGEN1 doesn't support CTO prevention due to
4394 			 * 4347A0 DAR Issue : JIRA:CRWLPCIEGEN2-443: Issue in DAR write
4395 			 */
4396 			if ((bus->sih->buscorerev == 19) &&
4397 				(((pcie_lnkst >> PCI_LINK_SPEED_SHIFT) &
4398 					PCI_LINK_SPEED_MASK) == PCIE_LNK_SPEED_GEN1)) {
4399 				bcmerror = BCME_UNSUPPORTED;
4400 				break;
4401 			}
4402 			bus->dhd->cto_enable = bool_val;
4403 			dhdpcie_cto_init(bus, bus->dhd->cto_enable);
4404 			DHD_ERROR(("%s: set CTO prevention and recovery enable/disable %d\n",
4405 				__FUNCTION__, bus->dhd->cto_enable));
4406 		}
4407 		break;
4408 
4409 	case IOV_GVAL(IOV_CTO_PREVENTION):
4410 		if (bus->sih->buscorerev < 19) {
4411 			bcmerror = BCME_UNSUPPORTED;
4412 			break;
4413 		}
4414 		int_val = (int32)bus->dhd->cto_enable;
4415 		bcopy(&int_val, arg, val_size);
4416 		break;
4417 
4418 	case IOV_SVAL(IOV_CTO_THRESHOLD):
4419 		{
4420 			if (bus->sih->buscorerev < 19) {
4421 				bcmerror = BCME_UNSUPPORTED;
4422 				break;
4423 			}
4424 			bus->dhd->cto_threshold = (uint32)int_val;
4425 		}
4426 		break;
4427 
4428 	case IOV_GVAL(IOV_CTO_THRESHOLD):
4429 		if (bus->sih->buscorerev < 19) {
4430 			bcmerror = BCME_UNSUPPORTED;
4431 			break;
4432 		}
4433 		if (bus->dhd->cto_threshold)
4434 			int_val = (int32)bus->dhd->cto_threshold;
4435 		else
4436 			int_val = (int32)PCIE_CTO_TO_THRESH_DEFAULT;
4437 
4438 		bcopy(&int_val, arg, val_size);
4439 		break;
4440 
4441 	case IOV_SVAL(IOV_PCIE_WD_RESET):
4442 		if (bool_val) {
4443 			pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *) bus->regs);
4444 		}
4445 		break;
4446 #ifdef DHD_EFI
4447 	case IOV_SVAL(IOV_CONTROL_SIGNAL):
4448 		{
4449 			bcmerror = dhd_control_signal(bus, arg, TRUE);
4450 			break;
4451 		}
4452 
4453 	case IOV_GVAL(IOV_CONTROL_SIGNAL):
4454 		{
4455 			bcmerror = dhd_control_signal(bus, params, FALSE);
4456 			break;
4457 		}
4458 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
4459 	case IOV_GVAL(IOV_DEEP_SLEEP):
4460 		int_val = bus->ds_enabled;
4461 		bcopy(&int_val, arg, val_size);
4462 		break;
4463 
4464 	case IOV_SVAL(IOV_DEEP_SLEEP):
4465 		if (int_val == 1) {
4466 			bus->ds_enabled = TRUE;
4467 			/* Deassert */
4468 			if (dhd_bus_set_device_wake(bus, FALSE) == BCME_OK) {
4469 #ifdef PCIE_INB_DW
4470 				int timeleft;
4471 				timeleft = dhd_os_ds_enter_wait(bus->dhd, NULL);
4472 				if (timeleft == 0) {
4473 					DHD_ERROR(("DS-ENTER timeout\n"));
4474 					bus->ds_enabled = FALSE;
4475 					break;
4476 				}
4477 #endif /* PCIE_INB_DW */
4478 			}
4479 			else {
4480 				DHD_ERROR(("%s: Enable Deep Sleep failed !\n", __FUNCTION__));
4481 				bus->ds_enabled = FALSE;
4482 			}
4483 		}
4484 		else if (int_val == 0) {
4485 			/* Assert */
4486 			if (dhd_bus_set_device_wake(bus, TRUE) == BCME_OK)
4487 				bus->ds_enabled = FALSE;
4488 			else
4489 				DHD_ERROR(("%s: Disable Deep Sleep failed !\n", __FUNCTION__));
4490 		}
4491 		else
4492 			DHD_ERROR(("%s: Invalid number, allowed only 0|1\n", __FUNCTION__));
4493 
4494 		break;
4495 #endif /* PCIE_OOB || PCIE_INB_DW */
4496 
4497 	case IOV_GVAL(IOV_WIFI_PROPERTIES):
4498 		bcmerror = dhd_wifi_properties(bus, params);
4499 		break;
4500 
4501 	case IOV_GVAL(IOV_OTP_DUMP):
4502 		bcmerror = dhd_otp_dump(bus, params);
4503 		break;
4504 #endif /* DHD_EFI */
4505 
4506 	case IOV_GVAL(IOV_IDMA_ENABLE):
4507 		int_val = bus->idma_enabled;
4508 		bcopy(&int_val, arg, val_size);
4509 		break;
4510 	case IOV_SVAL(IOV_IDMA_ENABLE):
4511 		bus->idma_enabled = (bool)int_val;
4512 		break;
4513 	case IOV_GVAL(IOV_IFRM_ENABLE):
4514 		int_val = bus->ifrm_enabled;
4515 		bcopy(&int_val, arg, val_size);
4516 		break;
4517 	case IOV_SVAL(IOV_IFRM_ENABLE):
4518 		bus->ifrm_enabled = (bool)int_val;
4519 		break;
4520 	case IOV_GVAL(IOV_CLEAR_RING):
4521 		bcopy(&int_val, arg, val_size);
4522 		dhd_flow_rings_flush(bus->dhd, 0);
4523 		break;
4524 	default:
4525 		bcmerror = BCME_UNSUPPORTED;
4526 		break;
4527 	}
4528 
4529 exit:
4530 	return bcmerror;
4531 } /* dhdpcie_bus_doiovar */
4532 
4533 /** Transfers bytes from host to dongle using pio mode */
4534 static int
dhdpcie_bus_lpback_req(struct dhd_bus * bus,uint32 len)4535 dhdpcie_bus_lpback_req(struct  dhd_bus *bus, uint32 len)
4536 {
4537 	if (bus->dhd == NULL) {
4538 		DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
4539 		return 0;
4540 	}
4541 	if (bus->dhd->prot == NULL) {
4542 		DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
4543 		return 0;
4544 	}
4545 	if (bus->dhd->busstate != DHD_BUS_DATA) {
4546 		DHD_ERROR(("%s: not in a readystate to LPBK  is not inited\n", __FUNCTION__));
4547 		return 0;
4548 	}
4549 	dhdmsgbuf_lpbk_req(bus->dhd, len);
4550 	return 0;
4551 }
4552 
4553 /* Ring DoorBell1 to indicate Hostready i.e. D3 Exit */
4554 void
dhd_bus_hostready(struct dhd_bus * bus)4555 dhd_bus_hostready(struct  dhd_bus *bus)
4556 {
4557 	if (!bus->dhd->d2h_hostrdy_supported) {
4558 		return;
4559 	}
4560 
4561 	if (bus->is_linkdown) {
4562 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
4563 		return;
4564 	}
4565 
4566 	DHD_INFO_HW4(("%s : Read PCICMD Reg: 0x%08X\n", __FUNCTION__,
4567 		dhd_pcie_config_read(bus->osh, PCI_CFG_CMD, sizeof(uint32))));
4568 	si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678);
4569 	bus->hostready_count ++;
4570 	DHD_INFO_HW4(("%s: Ring Hostready:%d\n", __FUNCTION__, bus->hostready_count));
4571 }
4572 
4573 /* Clear INTSTATUS */
4574 void
dhdpcie_bus_clear_intstatus(struct dhd_bus * bus)4575 dhdpcie_bus_clear_intstatus(struct dhd_bus *bus)
4576 {
4577 	uint32 intstatus = 0;
4578 	if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
4579 		(bus->sih->buscorerev == 2)) {
4580 		intstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
4581 		dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, intstatus);
4582 	} else {
4583 		/* this is a PCIE core register..not a config register... */
4584 		intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
4585 		si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, bus->def_intmask,
4586 			intstatus);
4587 	}
4588 }
4589 
4590 int
dhdpcie_bus_suspend(struct dhd_bus * bus,bool state)4591 dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
4592 {
4593 	int timeleft;
4594 	int rc = 0;
4595 	unsigned long flags;
4596 
4597 	printf("%s: state=%d\n", __FUNCTION__, state);
4598 	if (bus->dhd == NULL) {
4599 		DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
4600 		return BCME_ERROR;
4601 	}
4602 	if (bus->dhd->prot == NULL) {
4603 		DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
4604 		return BCME_ERROR;
4605 	}
4606 
4607 	if (dhd_query_bus_erros(bus->dhd)) {
4608 		return BCME_ERROR;
4609 	}
4610 
4611 	DHD_GENERAL_LOCK(bus->dhd, flags);
4612 	if (!(bus->dhd->busstate == DHD_BUS_DATA || bus->dhd->busstate == DHD_BUS_SUSPEND)) {
4613 		DHD_ERROR(("%s: not in a readystate\n", __FUNCTION__));
4614 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
4615 		return BCME_ERROR;
4616 	}
4617 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
4618 	if (bus->dhd->dongle_reset) {
4619 		DHD_ERROR(("Dongle is in reset state.\n"));
4620 		return -EIO;
4621 	}
4622 
4623 	/* Check whether we are already in the requested state.
4624 	 * state=TRUE means Suspend
4625 	 * state=FALSE meanse Resume
4626 	 */
4627 	if (state == TRUE && bus->dhd->busstate == DHD_BUS_SUSPEND) {
4628 		DHD_ERROR(("Bus is already in SUSPEND state.\n"));
4629 		return BCME_OK;
4630 	} else if (state == FALSE && bus->dhd->busstate == DHD_BUS_DATA) {
4631 		DHD_ERROR(("Bus is already in RESUME state.\n"));
4632 		return BCME_OK;
4633 	}
4634 
4635 	if (bus->d3_suspend_pending) {
4636 		DHD_ERROR(("Suspend pending ...\n"));
4637 		return BCME_ERROR;
4638 	}
4639 
4640 
4641 	if (state) {
4642 		int idle_retry = 0;
4643 		int active;
4644 
4645 		if (bus->is_linkdown) {
4646 			DHD_ERROR(("%s: PCIe link was down, state=%d\n",
4647 				__FUNCTION__, state));
4648 			return BCME_ERROR;
4649 		}
4650 
4651 		/* Suspend */
4652 		DHD_ERROR(("%s: Entering suspend state\n", __FUNCTION__));
4653 
4654 		DHD_GENERAL_LOCK(bus->dhd, flags);
4655 		if (DHD_BUS_BUSY_CHECK_IN_TX(bus->dhd)) {
4656 			DHD_ERROR(("Tx Request is not ended\n"));
4657 			bus->dhd->busstate = DHD_BUS_DATA;
4658 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
4659 #ifndef DHD_EFI
4660 			return -EBUSY;
4661 #else
4662 			return BCME_ERROR;
4663 #endif
4664 		}
4665 
4666 		/* stop all interface network queue. */
4667 		dhd_bus_stop_queue(bus);
4668 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
4669 
4670 		DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
4671 #ifdef DHD_TIMESYNC
4672 		/* disable time sync mechanism, if configed */
4673 		dhd_timesync_control(bus->dhd, TRUE);
4674 #endif /* DHD_TIMESYNC */
4675 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
4676 		dhd_bus_set_device_wake(bus, TRUE);
4677 #endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
4678 #ifdef PCIE_OOB
4679 		bus->oob_presuspend = TRUE;
4680 #endif
4681 #ifdef PCIE_INB_DW
4682 		/* De-assert at this point for In-band device_wake */
4683 		if (INBAND_DW_ENAB(bus)) {
4684 			dhd_bus_set_device_wake(bus, FALSE);
4685 			dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_HOST_SLEEP_WAIT);
4686 		}
4687 #endif /* PCIE_INB_DW */
4688 
4689 		/* Clear wait_for_d3_ack */
4690 		bus->wait_for_d3_ack = 0;
4691 		/*
4692 		 * Send H2D_HOST_D3_INFORM to dongle and mark
4693 		 * bus->d3_suspend_pending to TRUE in dhdpcie_send_mb_data
4694 		 * inside atomic context, so that no more DBs will be
4695 		 * rung after sending D3_INFORM
4696 		 */
4697 		dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
4698 
4699 		/* Wait for D3 ACK for D3_ACK_RESP_TIMEOUT seconds */
4700 		dhd_os_set_ioctl_resp_timeout(D3_ACK_RESP_TIMEOUT);
4701 		timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
4702 
4703 #ifdef DHD_RECOVER_TIMEOUT
4704 		if (bus->wait_for_d3_ack == 0) {
4705 			/* If wait_for_d3_ack was not updated because D2H MB was not received */
4706 			uint32 intstatus = 0;
4707 			uint32 intmask = 0;
4708 			intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
4709 			intmask = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
4710 			if ((intstatus) && (!intmask) && (timeleft == 0) &&
4711 				(!dhd_query_bus_erros(bus->dhd))) {
4712 
4713 				DHD_ERROR(("%s: D3 ACK trying again intstatus=%x intmask=%x\n",
4714 					__FUNCTION__, intstatus, intmask));
4715 				DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters\r\n"));
4716 				DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_en_count=%lu\n"
4717 					"isr_intr_disable_count=%lu suspend_intr_dis_count=%lu\n"
4718 					"dpc_return_busdown_count=%lu\n",
4719 					bus->resume_intr_enable_count, bus->dpc_intr_enable_count,
4720 					bus->isr_intr_disable_count,
4721 					bus->suspend_intr_disable_count,
4722 					bus->dpc_return_busdown_count));
4723 
4724 				dhd_prot_process_ctrlbuf(bus->dhd);
4725 
4726 				timeleft = dhd_os_d3ack_wait(bus->dhd, &bus->wait_for_d3_ack);
4727 
4728 				/* Enable Back Interrupts using IntMask  */
4729 				dhdpcie_bus_intr_enable(bus);
4730 			}
4731 
4732 
4733 		} /* bus->wait_for_d3_ack was 0 */
4734 #endif /* DHD_RECOVER_TIMEOUT */
4735 
4736 		DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
4737 
4738 		/* To allow threads that got pre-empted to complete.
4739 		 */
4740 		while ((active = dhd_os_check_wakelock_all(bus->dhd)) &&
4741 			(idle_retry < MAX_WKLK_IDLE_CHECK)) {
4742 			OSL_SLEEP(1);
4743 			idle_retry++;
4744 		}
4745 
4746 		if (bus->wait_for_d3_ack) {
4747 			DHD_ERROR(("%s: Got D3 Ack \n", __FUNCTION__));
4748 
4749 			/* Got D3 Ack. Suspend the bus */
4750 			if (active) {
4751 				DHD_ERROR(("%s():Suspend failed because of wakelock"
4752 					"restoring Dongle to D0\n", __FUNCTION__));
4753 
4754 				/*
4755 				 * Dongle still thinks that it has to be in D3 state until
4756 				 * it gets a D0 Inform, but we are backing off from suspend.
4757 				 * Ensure that Dongle is brought back to D0.
4758 				 *
4759 				 * Bringing back Dongle from D3 Ack state to D0 state is a
4760 				 * 2 step process. Dongle would want to know that D0 Inform
4761 				 * would be sent as a MB interrupt to bring it out of D3 Ack
4762 				 * state to D0 state. So we have to send both this message.
4763 				 */
4764 
4765 				/* Clear wait_for_d3_ack to send D0_INFORM or host_ready */
4766 				bus->wait_for_d3_ack = 0;
4767 
4768 				/* Enable back the intmask which was cleared in DPC
4769 				 * after getting D3_ACK.
4770 				 */
4771 				bus->resume_intr_enable_count++;
4772 				dhdpcie_bus_intr_enable(bus);
4773 
4774 				if (bus->use_d0_inform) {
4775 					DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
4776 					dhdpcie_send_mb_data(bus,
4777 						(H2D_HOST_D0_INFORM_IN_USE | H2D_HOST_D0_INFORM));
4778 					DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
4779 				}
4780 				/* ring doorbell 1 (hostready) */
4781 				dhd_bus_hostready(bus);
4782 
4783 				DHD_GENERAL_LOCK(bus->dhd, flags);
4784 				bus->d3_suspend_pending = FALSE;
4785 				bus->dhd->busstate = DHD_BUS_DATA;
4786 				/* resume all interface network queue. */
4787 				dhd_bus_start_queue(bus);
4788 				DHD_GENERAL_UNLOCK(bus->dhd, flags);
4789 				rc = BCME_ERROR;
4790 			} else {
4791 #ifdef PCIE_OOB
4792 				bus->oob_presuspend = FALSE;
4793 				if (OOB_DW_ENAB(bus)) {
4794 					dhd_bus_set_device_wake(bus, FALSE);
4795 				}
4796 #endif /* PCIE_OOB */
4797 #if defined(PCIE_OOB) || defined(BCMPCIE_OOB_HOST_WAKE)
4798 				bus->oob_presuspend = TRUE;
4799 #endif /* PCIE_OOB || BCMPCIE_OOB_HOST_WAKE */
4800 #ifdef PCIE_INB_DW
4801 				if (INBAND_DW_ENAB(bus)) {
4802 					if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
4803 						DW_DEVICE_HOST_SLEEP_WAIT) {
4804 						dhdpcie_bus_set_pcie_inband_dw_state(bus,
4805 							DW_DEVICE_HOST_SLEEP);
4806 					}
4807 				}
4808 #endif /* PCIE_INB_DW */
4809 				if (bus->use_d0_inform &&
4810 					(bus->api.fw_rev < PCIE_SHARED_VERSION_6)) {
4811 					DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
4812 					dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM_IN_USE));
4813 					DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
4814 				}
4815 #if defined(BCMPCIE_OOB_HOST_WAKE)
4816 				dhdpcie_oob_intr_set(bus, TRUE);
4817 #endif /* BCMPCIE_OOB_HOST_WAKE */
4818 
4819 				DHD_GENERAL_LOCK(bus->dhd, flags);
4820 				/* The Host cannot process interrupts now so disable the same.
4821 				 * No need to disable the dongle INTR using intmask, as we are
4822 				 * already calling dhdpcie_bus_intr_disable from DPC context after
4823 				 * getting D3_ACK. Code may not look symmetric between Suspend and
4824 				 * Resume paths but this is done to close down the timing window
4825 				 * between DPC and suspend context.
4826 				 */
4827 				/* Disable interrupt from host side!! */
4828 				dhdpcie_disable_irq_nosync(bus);
4829 
4830 				bus->dhd->d3ackcnt_timeout = 0;
4831 				bus->d3_suspend_pending = FALSE;
4832 				bus->dhd->busstate = DHD_BUS_SUSPEND;
4833 				DHD_GENERAL_UNLOCK(bus->dhd, flags);
4834 				/* Handle Host Suspend */
4835 				rc = dhdpcie_pci_suspend_resume(bus, state);
4836 			}
4837 		} else if (timeleft == 0) {
4838 			bus->dhd->d3ack_timeout_occured = TRUE;
4839 			/* If the D3 Ack has timeout */
4840 			bus->dhd->d3ackcnt_timeout++;
4841 			DHD_ERROR(("%s: resumed on timeout for D3 ACK d3_inform_cnt %d \n",
4842 					__FUNCTION__, bus->dhd->d3ackcnt_timeout));
4843 			DHD_GENERAL_LOCK(bus->dhd, flags);
4844 			bus->d3_suspend_pending = FALSE;
4845 			bus->dhd->busstate = DHD_BUS_DATA;
4846 			/* resume all interface network queue. */
4847 			dhd_bus_start_queue(bus);
4848 			DHD_GENERAL_UNLOCK(bus->dhd, flags);
4849 			if (!bus->dhd->dongle_trap_occured) {
4850 				uint32 intstatus = 0;
4851 
4852 				/* Check if PCIe bus status is valid */
4853 				intstatus = si_corereg(bus->sih,
4854 					bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
4855 				if (intstatus == (uint32)-1) {
4856 					/* Invalidate PCIe bus status */
4857 					bus->is_linkdown = 1;
4858 				}
4859 
4860 				dhd_bus_dump_console_buffer(bus);
4861 				dhd_prot_debug_info_print(bus->dhd);
4862 #ifdef DHD_FW_COREDUMP
4863 				if (bus->dhd->memdump_enabled) {
4864 					/* write core dump to file */
4865 					bus->dhd->memdump_type = DUMP_TYPE_D3_ACK_TIMEOUT;
4866 					dhdpcie_mem_dump(bus);
4867 				}
4868 #endif /* DHD_FW_COREDUMP */
4869 				DHD_ERROR(("%s: Event HANG send up due to D3_ACK timeout\n",
4870 					__FUNCTION__));
4871 #ifdef SUPPORT_LINKDOWN_RECOVERY
4872 #ifdef CONFIG_ARCH_MSM
4873 				bus->no_cfg_restore = 1;
4874 #endif /* CONFIG_ARCH_MSM */
4875 #endif /* SUPPORT_LINKDOWN_RECOVERY */
4876 				dhd_os_check_hang(bus->dhd, 0, -ETIMEDOUT);
4877 			}
4878 			rc = -ETIMEDOUT;
4879 		}
4880 		bus->wait_for_d3_ack = 1;
4881 
4882 #ifdef PCIE_OOB
4883 		bus->oob_presuspend = FALSE;
4884 #endif /* PCIE_OOB */
4885 	} else {
4886 		/* Resume */
4887 		/**
4888 		 * PCIE2_BAR0_CORE2_WIN gets reset after D3 cold.
4889 		 * si_backplane_access(function to read/write backplane)
4890 		 * updates the window(PCIE2_BAR0_CORE2_WIN) only if
4891 		 * window being accessed is different form the window
4892 		 * being pointed by second_bar0win.
4893 		 * Since PCIE2_BAR0_CORE2_WIN is already reset because of D3 cold,
4894 		 * invalidating second_bar0win after resume updates
4895 		 * PCIE2_BAR0_CORE2_WIN with right window.
4896 		 */
4897 		si_invalidate_second_bar0win(bus->sih);
4898 #if defined(BCMPCIE_OOB_HOST_WAKE)
4899 		DHD_OS_OOB_IRQ_WAKE_UNLOCK(bus->dhd);
4900 #endif /* BCMPCIE_OOB_HOST_WAKE */
4901 #ifdef PCIE_INB_DW
4902 		if (INBAND_DW_ENAB(bus)) {
4903 			if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == DW_DEVICE_HOST_SLEEP) {
4904 				dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_HOST_WAKE_WAIT);
4905 			}
4906 		}
4907 #endif /* PCIE_INB_DW */
4908 		rc = dhdpcie_pci_suspend_resume(bus, state);
4909 
4910 #ifdef BCMPCIE_OOB_HOST_WAKE
4911 		bus->oob_presuspend = FALSE;
4912 #endif /* BCMPCIE_OOB_HOST_WAKE */
4913 
4914 		if (bus->dhd->busstate == DHD_BUS_SUSPEND) {
4915 			if (bus->use_d0_inform) {
4916 				DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
4917 				dhdpcie_send_mb_data(bus, (H2D_HOST_D0_INFORM));
4918 				DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
4919 			}
4920 			/* ring doorbell 1 (hostready) */
4921 			dhd_bus_hostready(bus);
4922 		}
4923 
4924 		DHD_GENERAL_LOCK(bus->dhd, flags);
4925 		bus->dhd->busstate = DHD_BUS_DATA;
4926 #ifdef DHD_PCIE_RUNTIMEPM
4927 		if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) {
4928 			bus->bus_wake = 1;
4929 			OSL_SMP_WMB();
4930 			wake_up_interruptible(&bus->rpm_queue);
4931 		}
4932 #endif /* DHD_PCIE_RUNTIMEPM */
4933 #ifdef PCIE_OOB
4934 		/*
4935 		 * Assert & Deassert the Device Wake. The following is the explanation for doing so.
4936 		 * 0) At this point,
4937 		 *    Host is in suspend state, Link is in L2/L3, Dongle is in D3 Cold
4938 		 *    Device Wake is enabled.
4939 		 * 1) When the Host comes out of Suspend, it first sends PERST# in the Link.
4940 		 *    Looking at this the Dongle moves from D3 Cold to NO DS State
4941 		 * 2) Now The Host OS calls the "resume" function of DHD. From here the DHD first
4942 		 *    Asserts the Device Wake.
4943 		 *    From the defn, when the Device Wake is asserted, The dongle FW will ensure
4944 		 *    that the Dongle is out of deep sleep IF the device is already in deep sleep.
4945 		 *    But note that now the Dongle is NOT in Deep sleep and is actually in
4946 		 *    NO DS state. So just driving the Device Wake high does not trigger any state
4947 		 *    transitions. The Host should actually "Toggle" the Device Wake to ensure
4948 		 *    that Dongle synchronizes with the Host and starts the State Transition to D0.
4949 		 * 4) Note that the above explanation is applicable Only when the Host comes out of
4950 		 *    suspend and the Dongle comes out of D3 Cold
4951 		 */
4952 		/* This logic is not required when hostready is enabled */
4953 
4954 		if (!bus->dhd->d2h_hostrdy_supported) {
4955 			if (OOB_DW_ENAB(bus)) {
4956 				dhd_bus_set_device_wake(bus, TRUE);
4957 				OSL_DELAY(1000);
4958 				dhd_bus_set_device_wake(bus, FALSE);
4959 			}
4960 		}
4961 #endif /* PCIE_OOB */
4962 		/* resume all interface network queue. */
4963 		dhd_bus_start_queue(bus);
4964 		/* The Host is ready to process interrupts now so enable the same. */
4965 
4966 		/* TODO: for NDIS also we need to use enable_irq in future */
4967 		bus->resume_intr_enable_count++;
4968 		dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */
4969 		dhdpcie_enable_irq(bus); /* Enable back interrupt from Host side!! */
4970 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
4971 #ifdef DHD_TIMESYNC
4972 		DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
4973 		/* enable time sync mechanism, if configed */
4974 		dhd_timesync_control(bus->dhd, FALSE);
4975 		DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
4976 #endif /* DHD_TIMESYNC */
4977 	}
4978 	return rc;
4979 }
4980 
4981 uint32
dhdpcie_force_alp(struct dhd_bus * bus,bool enable)4982 dhdpcie_force_alp(struct dhd_bus *bus, bool enable)
4983 {
4984 	ASSERT(bus && bus->sih);
4985 	if (enable) {
4986 	si_corereg(bus->sih, bus->sih->buscoreidx,
4987 		OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, CCS_FORCEALP);
4988 	} else {
4989 		si_corereg(bus->sih, bus->sih->buscoreidx,
4990 			OFFSETOF(sbpcieregs_t, u.pcie2.clk_ctl_st), CCS_FORCEALP, 0);
4991 	}
4992 	return 0;
4993 }
4994 
4995 /* set pcie l1 entry time: dhd pciereg 0x1004[22:16] */
4996 uint32
dhdpcie_set_l1_entry_time(struct dhd_bus * bus,int l1_entry_time)4997 dhdpcie_set_l1_entry_time(struct dhd_bus *bus, int l1_entry_time)
4998 {
4999 	uint reg_val;
5000 
5001 	ASSERT(bus && bus->sih);
5002 
5003 	si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
5004 		0x1004);
5005 	reg_val = si_corereg(bus->sih, bus->sih->buscoreidx,
5006 		OFFSETOF(sbpcieregs_t, configdata), 0, 0);
5007 	reg_val = (reg_val & ~(0x7f << 16)) | ((l1_entry_time & 0x7f) << 16);
5008 	si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
5009 		reg_val);
5010 
5011 	return 0;
5012 }
5013 
5014 /** Transfers bytes from host to dongle and to host again using DMA */
5015 static int
dhdpcie_bus_dmaxfer_req(struct dhd_bus * bus,uint32 len,uint32 srcdelay,uint32 destdelay,uint32 d11_lpbk)5016 dhdpcie_bus_dmaxfer_req(
5017 	struct  dhd_bus *bus, uint32 len, uint32 srcdelay, uint32 destdelay, uint32 d11_lpbk)
5018 {
5019 	if (bus->dhd == NULL) {
5020 		DHD_ERROR(("%s: bus not inited\n", __FUNCTION__));
5021 		return BCME_ERROR;
5022 	}
5023 	if (bus->dhd->prot == NULL) {
5024 		DHD_ERROR(("%s: prot is not inited\n", __FUNCTION__));
5025 		return BCME_ERROR;
5026 	}
5027 	if (bus->dhd->busstate != DHD_BUS_DATA) {
5028 		DHD_ERROR(("%s: not in a readystate to LPBK  is not inited\n", __FUNCTION__));
5029 		return BCME_ERROR;
5030 	}
5031 
5032 	if (len < 5 || len > 4194296) {
5033 		DHD_ERROR(("%s: len is too small or too large\n", __FUNCTION__));
5034 		return BCME_ERROR;
5035 	}
5036 	return dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay, d11_lpbk);
5037 }
5038 
5039 
5040 
5041 static int
dhdpcie_bus_download_state(dhd_bus_t * bus,bool enter)5042 dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
5043 {
5044 	int bcmerror = 0;
5045 	volatile uint32 *cr4_regs;
5046 
5047 	if (!bus->sih) {
5048 		DHD_ERROR(("%s: NULL sih!!\n", __FUNCTION__));
5049 		return BCME_ERROR;
5050 	}
5051 	/* To enter download state, disable ARM and reset SOCRAM.
5052 	 * To exit download state, simply reset ARM (default is RAM boot).
5053 	 */
5054 	if (enter) {
5055 		/* Make sure BAR1 maps to backplane address 0 */
5056 		dhdpcie_bus_cfg_write_dword(bus, PCI_BAR1_WIN, 4, 0x00000000);
5057 		bus->alp_only = TRUE;
5058 
5059 		/* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
5060 		cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
5061 
5062 		if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
5063 		    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) &&
5064 		    !(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
5065 			DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
5066 			bcmerror = BCME_ERROR;
5067 			goto fail;
5068 		}
5069 
5070 		if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
5071 			/* Halt ARM & remove reset */
5072 			si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
5073 			if (!(si_setcore(bus->sih, SYSMEM_CORE_ID, 0))) {
5074 				DHD_ERROR(("%s: Failed to find SYSMEM core!\n", __FUNCTION__));
5075 				bcmerror = BCME_ERROR;
5076 				goto fail;
5077 			}
5078 			si_core_reset(bus->sih, 0, 0);
5079 			/* reset last 4 bytes of RAM address. to be used for shared area */
5080 			dhdpcie_init_shared_addr(bus);
5081 		} else if (cr4_regs == NULL) { /* no CR4 present on chip */
5082 			si_core_disable(bus->sih, 0);
5083 
5084 			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
5085 				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
5086 				bcmerror = BCME_ERROR;
5087 				goto fail;
5088 			}
5089 
5090 			si_core_reset(bus->sih, 0, 0);
5091 
5092 			/* Clear the top bit of memory */
5093 			if (bus->ramsize) {
5094 				uint32 zeros = 0;
5095 				if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4,
5096 				                     (uint8*)&zeros, 4) < 0) {
5097 					bcmerror = BCME_ERROR;
5098 					goto fail;
5099 				}
5100 			}
5101 		} else {
5102 			/* For CR4,
5103 			 * Halt ARM
5104 			 * Remove ARM reset
5105 			 * Read RAM base address [0x18_0000]
5106 			 * [next] Download firmware
5107 			 * [done at else] Populate the reset vector
5108 			 * [done at else] Remove ARM halt
5109 			*/
5110 			/* Halt ARM & remove reset */
5111 			si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
5112 			if (BCM43602_CHIP(bus->sih->chip)) {
5113 				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5);
5114 				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
5115 				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7);
5116 				W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
5117 			}
5118 			/* reset last 4 bytes of RAM address. to be used for shared area */
5119 			dhdpcie_init_shared_addr(bus);
5120 		}
5121 	} else {
5122 		if (si_setcore(bus->sih, ARMCA7_CORE_ID, 0)) {
5123 			/* write vars */
5124 			if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
5125 				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
5126 				goto fail;
5127 			}
5128 			/* switch back to arm core again */
5129 			if (!(si_setcore(bus->sih, ARMCA7_CORE_ID, 0))) {
5130 				DHD_ERROR(("%s: Failed to find ARM CA7 core!\n", __FUNCTION__));
5131 				bcmerror = BCME_ERROR;
5132 				goto fail;
5133 			}
5134 			/* write address 0 with reset instruction */
5135 			bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
5136 				(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
5137 			/* now remove reset and halt and continue to run CA7 */
5138 		} else if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
5139 			if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
5140 				DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
5141 				bcmerror = BCME_ERROR;
5142 				goto fail;
5143 			}
5144 
5145 			if (!si_iscoreup(bus->sih)) {
5146 				DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
5147 				bcmerror = BCME_ERROR;
5148 				goto fail;
5149 			}
5150 
5151 			/* Enable remap before ARM reset but after vars.
5152 			 * No backplane access in remap mode
5153 			 */
5154 			if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
5155 			    !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
5156 				DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
5157 				bcmerror = BCME_ERROR;
5158 				goto fail;
5159 			}
5160 
5161 
5162 			if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
5163 			    !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
5164 				DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
5165 				bcmerror = BCME_ERROR;
5166 				goto fail;
5167 			}
5168 		} else {
5169 			if (BCM43602_CHIP(bus->sih->chip)) {
5170 				/* Firmware crashes on SOCSRAM access when core is in reset */
5171 				if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
5172 					DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
5173 						__FUNCTION__));
5174 					bcmerror = BCME_ERROR;
5175 					goto fail;
5176 				}
5177 				si_core_reset(bus->sih, 0, 0);
5178 				si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
5179 			}
5180 
5181 			/* write vars */
5182 			if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
5183 				DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
5184 				goto fail;
5185 			}
5186 
5187 #ifdef BCM_ASLR_HEAP
5188 			/* write a random number to TCM for the purpose of
5189 			 * randomizing heap address space.
5190 			 */
5191 			dhdpcie_wrt_rnd(bus);
5192 #endif /* BCM_ASLR_HEAP */
5193 
5194 			/* switch back to arm core again */
5195 			if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
5196 				DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
5197 				bcmerror = BCME_ERROR;
5198 				goto fail;
5199 			}
5200 
5201 			/* write address 0 with reset instruction */
5202 			bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
5203 				(uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
5204 
5205 			if (bcmerror == BCME_OK) {
5206 				uint32 tmp;
5207 
5208 				bcmerror = dhdpcie_bus_membytes(bus, FALSE, 0,
5209 				                                (uint8 *)&tmp, sizeof(tmp));
5210 
5211 				if (bcmerror == BCME_OK && tmp != bus->resetinstr) {
5212 					DHD_ERROR(("%s: Failed to write 0x%08x to addr 0\n",
5213 					          __FUNCTION__, bus->resetinstr));
5214 					DHD_ERROR(("%s: contents of addr 0 is 0x%08x\n",
5215 					          __FUNCTION__, tmp));
5216 					bcmerror = BCME_ERROR;
5217 					goto fail;
5218 				}
5219 			}
5220 
5221 			/* now remove reset and halt and continue to run CR4 */
5222 		}
5223 
5224 		si_core_reset(bus->sih, 0, 0);
5225 
5226 		/* Allow HT Clock now that the ARM is running. */
5227 		bus->alp_only = FALSE;
5228 
5229 		bus->dhd->busstate = DHD_BUS_LOAD;
5230 	}
5231 
5232 fail:
5233 	/* Always return to PCIE core */
5234 	si_setcore(bus->sih, PCIE2_CORE_ID, 0);
5235 
5236 	return bcmerror;
5237 } /* dhdpcie_bus_download_state */
5238 
5239 static int
dhdpcie_bus_write_vars(dhd_bus_t * bus)5240 dhdpcie_bus_write_vars(dhd_bus_t *bus)
5241 {
5242 	int bcmerror = 0;
5243 	uint32 varsize, phys_size;
5244 	uint32 varaddr;
5245 	uint8 *vbuffer;
5246 	uint32 varsizew;
5247 #ifdef DHD_DEBUG
5248 	uint8 *nvram_ularray;
5249 #endif /* DHD_DEBUG */
5250 
5251 	/* Even if there are no vars are to be written, we still need to set the ramsize. */
5252 	varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
5253 	varaddr = (bus->ramsize - 4) - varsize;
5254 
5255 	varaddr += bus->dongle_ram_base;
5256 
5257 	if (bus->vars) {
5258 
5259 		vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
5260 		if (!vbuffer)
5261 			return BCME_NOMEM;
5262 
5263 		bzero(vbuffer, varsize);
5264 		bcopy(bus->vars, vbuffer, bus->varsz);
5265 		/* Write the vars list */
5266 		bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
5267 
5268 		/* Implement read back and verify later */
5269 #ifdef DHD_DEBUG
5270 		/* Verify NVRAM bytes */
5271 		DHD_INFO(("%s: Compare NVRAM dl & ul; varsize=%d\n", __FUNCTION__, varsize));
5272 		nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
5273 		if (!nvram_ularray)
5274 			return BCME_NOMEM;
5275 
5276 		/* Upload image to verify downloaded contents. */
5277 		memset(nvram_ularray, 0xaa, varsize);
5278 
5279 		/* Read the vars list to temp buffer for comparison */
5280 		bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
5281 		if (bcmerror) {
5282 				DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
5283 					__FUNCTION__, bcmerror, varsize, varaddr));
5284 		}
5285 
5286 		/* Compare the org NVRAM with the one read from RAM */
5287 		if (memcmp(vbuffer, nvram_ularray, varsize)) {
5288 			DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
5289 		} else
5290 			DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
5291 			__FUNCTION__));
5292 
5293 		MFREE(bus->dhd->osh, nvram_ularray, varsize);
5294 #endif /* DHD_DEBUG */
5295 
5296 		MFREE(bus->dhd->osh, vbuffer, varsize);
5297 	}
5298 
5299 	phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
5300 
5301 	phys_size += bus->dongle_ram_base;
5302 
5303 	/* adjust to the user specified RAM */
5304 	DHD_INFO(("%s: Physical memory size: %d, usable memory size: %d\n", __FUNCTION__,
5305 		phys_size, bus->ramsize));
5306 	DHD_INFO(("%s: Vars are at %d, orig varsize is %d\n", __FUNCTION__,
5307 		varaddr, varsize));
5308 	varsize = ((phys_size - 4) - varaddr);
5309 
5310 	/*
5311 	 * Determine the length token:
5312 	 * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
5313 	 */
5314 	if (bcmerror) {
5315 		varsizew = 0;
5316 		bus->nvram_csm = varsizew;
5317 	} else {
5318 		varsizew = varsize / 4;
5319 		varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
5320 		bus->nvram_csm = varsizew;
5321 		varsizew = htol32(varsizew);
5322 	}
5323 
5324 	DHD_INFO(("%s: New varsize is %d, length token=0x%08x\n", __FUNCTION__, varsize, varsizew));
5325 
5326 	/* Write the length token to the last word */
5327 	bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4),
5328 		(uint8*)&varsizew, 4);
5329 
5330 	return bcmerror;
5331 } /* dhdpcie_bus_write_vars */
5332 
5333 int
dhdpcie_downloadvars(dhd_bus_t * bus,void * arg,int len)5334 dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len)
5335 {
5336 	int bcmerror = BCME_OK;
5337 
5338 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5339 
5340 	/* Basic sanity checks */
5341 	if (bus->dhd->up) {
5342 		bcmerror = BCME_NOTDOWN;
5343 		goto err;
5344 	}
5345 	if (!len) {
5346 		bcmerror = BCME_BUFTOOSHORT;
5347 		goto err;
5348 	}
5349 
5350 	/* Free the old ones and replace with passed variables */
5351 	if (bus->vars)
5352 		MFREE(bus->dhd->osh, bus->vars, bus->varsz);
5353 
5354 	bus->vars = MALLOC(bus->dhd->osh, len);
5355 	bus->varsz = bus->vars ? len : 0;
5356 	if (bus->vars == NULL) {
5357 		bcmerror = BCME_NOMEM;
5358 		goto err;
5359 	}
5360 
5361 	/* Copy the passed variables, which should include the terminating double-null */
5362 	bcopy(arg, bus->vars, bus->varsz);
5363 
5364 #ifdef DHD_USE_SINGLE_NVRAM_FILE
5365 	if (dhd_bus_get_fw_mode(bus->dhd) == DHD_FLAG_MFG_MODE) {
5366 		char *sp = NULL;
5367 		char *ep = NULL;
5368 		int i;
5369 		char tag[2][8] = {"ccode=", "regrev="};
5370 
5371 		/* Find ccode and regrev info */
5372 		for (i = 0; i < 2; i++) {
5373 			sp = strnstr(bus->vars, tag[i], bus->varsz);
5374 			if (!sp) {
5375 				DHD_ERROR(("%s: Could not find ccode info from the nvram %s\n",
5376 					__FUNCTION__, bus->nv_path));
5377 				bcmerror = BCME_ERROR;
5378 				goto err;
5379 			}
5380 			sp = strchr(sp, '=');
5381 			ep = strchr(sp, '\0');
5382 			/* We assumed that string length of both ccode and
5383 			 * regrev values should not exceed WLC_CNTRY_BUF_SZ
5384 			 */
5385 			if (sp && ep && ((ep - sp) <= WLC_CNTRY_BUF_SZ)) {
5386 				sp++;
5387 				while (*sp != '\0') {
5388 					DHD_INFO(("%s: parse '%s', current sp = '%c'\n",
5389 						__FUNCTION__, tag[i], *sp));
5390 					*sp++ = '0';
5391 				}
5392 			} else {
5393 				DHD_ERROR(("%s: Invalid parameter format when parsing for %s\n",
5394 					__FUNCTION__, tag[i]));
5395 				bcmerror = BCME_ERROR;
5396 				goto err;
5397 			}
5398 		}
5399 	}
5400 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
5401 
5402 
5403 err:
5404 	return bcmerror;
5405 }
5406 
5407 /* loop through the capability list and see if the pcie capabilty exists */
5408 uint8
dhdpcie_find_pci_capability(osl_t * osh,uint8 req_cap_id)5409 dhdpcie_find_pci_capability(osl_t *osh, uint8 req_cap_id)
5410 {
5411 	uint8 cap_id;
5412 	uint8 cap_ptr = 0;
5413 	uint8 byte_val;
5414 
5415 	/* check for Header type 0 */
5416 	byte_val = read_pci_cfg_byte(PCI_CFG_HDR);
5417 	if ((byte_val & 0x7f) != PCI_HEADER_NORMAL) {
5418 		DHD_ERROR(("%s : PCI config header not normal.\n", __FUNCTION__));
5419 		goto end;
5420 	}
5421 
5422 	/* check if the capability pointer field exists */
5423 	byte_val = read_pci_cfg_byte(PCI_CFG_STAT);
5424 	if (!(byte_val & PCI_CAPPTR_PRESENT)) {
5425 		DHD_ERROR(("%s : PCI CAP pointer not present.\n", __FUNCTION__));
5426 		goto end;
5427 	}
5428 
5429 	cap_ptr = read_pci_cfg_byte(PCI_CFG_CAPPTR);
5430 	/* check if the capability pointer is 0x00 */
5431 	if (cap_ptr == 0x00) {
5432 		DHD_ERROR(("%s : PCI CAP pointer is 0x00.\n", __FUNCTION__));
5433 		goto end;
5434 	}
5435 
5436 	/* loop thr'u the capability list and see if the pcie capabilty exists */
5437 
5438 	cap_id = read_pci_cfg_byte(cap_ptr);
5439 
5440 	while (cap_id != req_cap_id) {
5441 		cap_ptr = read_pci_cfg_byte((cap_ptr + 1));
5442 		if (cap_ptr == 0x00) break;
5443 		cap_id = read_pci_cfg_byte(cap_ptr);
5444 	}
5445 
5446 end:
5447 	return cap_ptr;
5448 }
5449 
5450 void
dhdpcie_pme_active(osl_t * osh,bool enable)5451 dhdpcie_pme_active(osl_t *osh, bool enable)
5452 {
5453 	uint8 cap_ptr;
5454 	uint32 pme_csr;
5455 
5456 	cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
5457 
5458 	if (!cap_ptr) {
5459 		DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
5460 		return;
5461 	}
5462 
5463 	pme_csr = OSL_PCI_READ_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32));
5464 	DHD_ERROR(("%s : pme_sts_ctrl 0x%x\n", __FUNCTION__, pme_csr));
5465 
5466 	pme_csr |= PME_CSR_PME_STAT;
5467 	if (enable) {
5468 		pme_csr |= PME_CSR_PME_EN;
5469 	} else {
5470 		pme_csr &= ~PME_CSR_PME_EN;
5471 	}
5472 
5473 	OSL_PCI_WRITE_CONFIG(osh, cap_ptr + PME_CSR_OFFSET, sizeof(uint32), pme_csr);
5474 }
5475 
5476 bool
dhdpcie_pme_cap(osl_t * osh)5477 dhdpcie_pme_cap(osl_t *osh)
5478 {
5479 	uint8 cap_ptr;
5480 	uint32 pme_cap;
5481 
5482 	cap_ptr = dhdpcie_find_pci_capability(osh, PCI_CAP_POWERMGMTCAP_ID);
5483 
5484 	if (!cap_ptr) {
5485 		DHD_ERROR(("%s : Power Management Capability not present\n", __FUNCTION__));
5486 		return FALSE;
5487 	}
5488 
5489 	pme_cap = OSL_PCI_READ_CONFIG(osh, cap_ptr, sizeof(uint32));
5490 
5491 	DHD_ERROR(("%s : pme_cap 0x%x\n", __FUNCTION__, pme_cap));
5492 
5493 	return ((pme_cap & PME_CAP_PM_STATES) != 0);
5494 }
5495 
5496 uint32
dhdpcie_lcreg(osl_t * osh,uint32 mask,uint32 val)5497 dhdpcie_lcreg(osl_t *osh, uint32 mask, uint32 val)
5498 {
5499 
5500 	uint8	pcie_cap;
5501 	uint8	lcreg_offset;	/* PCIE capability LCreg offset in the config space */
5502 	uint32	reg_val;
5503 
5504 
5505 	pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
5506 
5507 	if (!pcie_cap) {
5508 		DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
5509 		return 0;
5510 	}
5511 
5512 	lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
5513 
5514 	/* set operation */
5515 	if (mask) {
5516 		/* read */
5517 		reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
5518 
5519 		/* modify */
5520 		reg_val &= ~mask;
5521 		reg_val |= (mask & val);
5522 
5523 		/* write */
5524 		OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
5525 	}
5526 	return OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
5527 }
5528 
5529 
5530 
5531 uint8
dhdpcie_clkreq(osl_t * osh,uint32 mask,uint32 val)5532 dhdpcie_clkreq(osl_t *osh, uint32 mask, uint32 val)
5533 {
5534 	uint8	pcie_cap;
5535 	uint32	reg_val;
5536 	uint8	lcreg_offset;	/* PCIE capability LCreg offset in the config space */
5537 
5538 	pcie_cap = dhdpcie_find_pci_capability(osh, PCI_CAP_PCIECAP_ID);
5539 
5540 	if (!pcie_cap) {
5541 		DHD_ERROR(("%s : PCIe Capability not present\n", __FUNCTION__));
5542 		return 0;
5543 	}
5544 
5545 	lcreg_offset = pcie_cap + PCIE_CAP_LINKCTRL_OFFSET;
5546 
5547 	reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
5548 	/* set operation */
5549 	if (mask) {
5550 		if (val)
5551 			reg_val |= PCIE_CLKREQ_ENAB;
5552 		else
5553 			reg_val &= ~PCIE_CLKREQ_ENAB;
5554 		OSL_PCI_WRITE_CONFIG(osh, lcreg_offset, sizeof(uint32), reg_val);
5555 		reg_val = OSL_PCI_READ_CONFIG(osh, lcreg_offset, sizeof(uint32));
5556 	}
5557 	if (reg_val & PCIE_CLKREQ_ENAB)
5558 		return 1;
5559 	else
5560 		return 0;
5561 }
5562 
dhd_dump_intr_registers(dhd_pub_t * dhd,struct bcmstrbuf * strbuf)5563 void dhd_dump_intr_registers(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
5564 {
5565 	uint32 intstatus = 0;
5566 	uint32 intmask = 0;
5567 	uint32 mbintstatus = 0;
5568 	uint32 d2h_mb_data = 0;
5569 
5570 	intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
5571 	intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
5572 	mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, PCID2H_MailBox, 0, 0);
5573 	dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
5574 
5575 	bcm_bprintf(strbuf, "intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n",
5576 		intstatus, intmask, mbintstatus);
5577 	bcm_bprintf(strbuf, "d2h_mb_data=0x%x def_intmask=0x%x\n",
5578 		d2h_mb_data, dhd->bus->def_intmask);
5579 	bcm_bprintf(strbuf, "\n ------- DUMPING INTR enable/disable counters-------\n");
5580 	bcm_bprintf(strbuf, "resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
5581 		"isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
5582 		"dpc_return_busdown_count=%lu\n",
5583 		dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count,
5584 		dhd->bus->isr_intr_disable_count, dhd->bus->suspend_intr_disable_count,
5585 		dhd->bus->dpc_return_busdown_count);
5586 }
5587 
5588 /** Add bus dump output to a buffer */
dhd_bus_dump(dhd_pub_t * dhdp,struct bcmstrbuf * strbuf)5589 void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
5590 {
5591 	uint16 flowid;
5592 	int ix = 0;
5593 	flow_ring_node_t *flow_ring_node;
5594 	flow_info_t *flow_info;
5595 	char eabuf[ETHER_ADDR_STR_LEN];
5596 
5597 	if (dhdp->busstate != DHD_BUS_DATA)
5598 		return;
5599 
5600 #ifdef DHD_WAKE_STATUS
5601 	bcm_bprintf(strbuf, "wake %u rxwake %u readctrlwake %u\n",
5602 		bcmpcie_get_total_wake(dhdp->bus), dhdp->bus->wake_counts.rxwake,
5603 		dhdp->bus->wake_counts.rcwake);
5604 #ifdef DHD_WAKE_RX_STATUS
5605 	bcm_bprintf(strbuf, " unicast %u muticast %u broadcast %u arp %u\n",
5606 		dhdp->bus->wake_counts.rx_ucast, dhdp->bus->wake_counts.rx_mcast,
5607 		dhdp->bus->wake_counts.rx_bcast, dhdp->bus->wake_counts.rx_arp);
5608 	bcm_bprintf(strbuf, " multi4 %u multi6 %u icmp6 %u multiother %u\n",
5609 		dhdp->bus->wake_counts.rx_multi_ipv4, dhdp->bus->wake_counts.rx_multi_ipv6,
5610 		dhdp->bus->wake_counts.rx_icmpv6, dhdp->bus->wake_counts.rx_multi_other);
5611 	bcm_bprintf(strbuf, " icmp6_ra %u, icmp6_na %u, icmp6_ns %u\n",
5612 		dhdp->bus->wake_counts.rx_icmpv6_ra, dhdp->bus->wake_counts.rx_icmpv6_na,
5613 		dhdp->bus->wake_counts.rx_icmpv6_ns);
5614 #endif /* DHD_WAKE_RX_STATUS */
5615 #ifdef DHD_WAKE_EVENT_STATUS
5616 	for (flowid = 0; flowid < WLC_E_LAST; flowid++)
5617 		if (dhdp->bus->wake_counts.rc_event[flowid] != 0)
5618 			bcm_bprintf(strbuf, " %s = %u\n", bcmevent_get_name(flowid),
5619 				dhdp->bus->wake_counts.rc_event[flowid]);
5620 	bcm_bprintf(strbuf, "\n");
5621 #endif /* DHD_WAKE_EVENT_STATUS */
5622 #endif /* DHD_WAKE_STATUS */
5623 
5624 	dhd_prot_print_info(dhdp, strbuf);
5625 	dhd_dump_intr_registers(dhdp, strbuf);
5626 	bcm_bprintf(strbuf, "h2d_mb_data_ptr_addr 0x%x, d2h_mb_data_ptr_addr 0x%x\n",
5627 		dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr);
5628 	bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr));
5629 	bcm_bprintf(strbuf,
5630 		"%s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ",
5631 		"Num:", "Flow", "If", "Prio", ":Dest_MacAddress:", "Qlen", "CLen", "L2CLen",
5632 		"Overflows", "RD", "WR");
5633 	bcm_bprintf(strbuf, "%5s %6s %5s \n", "Acked", "tossed", "noack");
5634 
5635 	for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
5636 		flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
5637 		if (!flow_ring_node->active)
5638 			continue;
5639 
5640 		flow_info = &flow_ring_node->flow_info;
5641 		bcm_bprintf(strbuf,
5642 			"%3d. %4d %2d %4d %17s %4d %4d %6d %10u ", ix++,
5643 			flow_ring_node->flowid, flow_info->ifindex, flow_info->tid,
5644 			bcm_ether_ntoa((struct ether_addr *)&flow_info->da, eabuf),
5645 			DHD_FLOW_QUEUE_LEN(&flow_ring_node->queue),
5646 			DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_CLEN_PTR(&flow_ring_node->queue)),
5647 			DHD_CUMM_CTR_READ(DHD_FLOW_QUEUE_L2CLEN_PTR(&flow_ring_node->queue)),
5648 			DHD_FLOW_QUEUE_FAILURES(&flow_ring_node->queue));
5649 		dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf,
5650 			"%4d %4d ");
5651 		bcm_bprintf(strbuf,
5652 			"%5s %6s %5s\n", "NA", "NA", "NA");
5653 	}
5654 	bcm_bprintf(strbuf, "D3 inform cnt %d\n", dhdp->bus->d3_inform_cnt);
5655 	bcm_bprintf(strbuf, "D0 inform cnt %d\n", dhdp->bus->d0_inform_cnt);
5656 	bcm_bprintf(strbuf, "D0 inform in use cnt %d\n", dhdp->bus->d0_inform_in_use_cnt);
5657 	if (dhdp->d2h_hostrdy_supported) {
5658 		bcm_bprintf(strbuf, "hostready count:%d\n", dhdp->bus->hostready_count);
5659 	}
5660 #ifdef PCIE_INB_DW
5661 	/* Inband device wake counters */
5662 	if (INBAND_DW_ENAB(dhdp->bus)) {
5663 		bcm_bprintf(strbuf, "Inband device_wake assert count: %d\n",
5664 			dhdp->bus->inband_dw_assert_cnt);
5665 		bcm_bprintf(strbuf, "Inband device_wake deassert count: %d\n",
5666 			dhdp->bus->inband_dw_deassert_cnt);
5667 		bcm_bprintf(strbuf, "Inband DS-EXIT <host initiated> count: %d\n",
5668 			dhdp->bus->inband_ds_exit_host_cnt);
5669 		bcm_bprintf(strbuf, "Inband DS-EXIT <device initiated> count: %d\n",
5670 			dhdp->bus->inband_ds_exit_device_cnt);
5671 		bcm_bprintf(strbuf, "Inband DS-EXIT Timeout count: %d\n",
5672 			dhdp->bus->inband_ds_exit_to_cnt);
5673 		bcm_bprintf(strbuf, "Inband HOST_SLEEP-EXIT Timeout count: %d\n",
5674 			dhdp->bus->inband_host_sleep_exit_to_cnt);
5675 	}
5676 #endif /* PCIE_INB_DW */
5677 }
5678 
5679 /**
5680  * Brings transmit packets on all flow rings closer to the dongle, by moving (a subset) from their
5681  * flow queue to their flow ring.
5682  */
5683 static void
dhd_update_txflowrings(dhd_pub_t * dhd)5684 dhd_update_txflowrings(dhd_pub_t *dhd)
5685 {
5686 	unsigned long flags;
5687 	dll_t *item, *next;
5688 	flow_ring_node_t *flow_ring_node;
5689 	struct dhd_bus *bus = dhd->bus;
5690 
5691 	/* Hold flowring_list_lock to ensure no race condition while accessing the List */
5692 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
5693 	for (item = dll_head_p(&bus->flowring_active_list);
5694 		(!dhd_is_device_removed(dhd) && !dll_end(&bus->flowring_active_list, item));
5695 		item = next) {
5696 		if (dhd->hang_was_sent) {
5697 			break;
5698 		}
5699 
5700 		next = dll_next_p(item);
5701 		flow_ring_node = dhd_constlist_to_flowring(item);
5702 
5703 		/* Ensure that flow_ring_node in the list is Not Null */
5704 		ASSERT(flow_ring_node != NULL);
5705 
5706 		/* Ensure that the flowring node has valid contents */
5707 		ASSERT(flow_ring_node->prot_info != NULL);
5708 
5709 		dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
5710 	}
5711 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
5712 }
5713 
5714 /** Mailbox ringbell Function */
5715 static void
dhd_bus_gen_devmb_intr(struct dhd_bus * bus)5716 dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
5717 {
5718 	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
5719 		(bus->sih->buscorerev == 4)) {
5720 		DHD_ERROR(("%s: mailbox communication not supported\n", __FUNCTION__));
5721 		return;
5722 	}
5723 	if (bus->db1_for_mb)  {
5724 		/* this is a pcie core register, not the config register */
5725 		DHD_INFO(("%s: writing a mail box interrupt to the device, through doorbell 1\n", __FUNCTION__));
5726 		si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678);
5727 	} else {
5728 		DHD_INFO(("%s: writing a mail box interrupt to the device, through config space\n", __FUNCTION__));
5729 		dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
5730 		dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
5731 	}
5732 }
5733 
5734 /* Upon receiving a mailbox interrupt,
5735  * if H2D_FW_TRAP bit is set in mailbox location
5736  * device traps
5737  */
5738 static void
dhdpcie_fw_trap(dhd_bus_t * bus)5739 dhdpcie_fw_trap(dhd_bus_t *bus)
5740 {
5741 	/* Send the mailbox data and generate mailbox intr. */
5742 	dhdpcie_send_mb_data(bus, H2D_FW_TRAP);
5743 }
5744 
5745 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
5746 void
dhd_bus_doorbell_timeout_reset(struct dhd_bus * bus)5747 dhd_bus_doorbell_timeout_reset(struct dhd_bus *bus)
5748 {
5749 	if (dhd_doorbell_timeout)
5750 		dhd_timeout_start(&bus->doorbell_timer,
5751 			(dhd_doorbell_timeout * 1000) / dhd_watchdog_ms);
5752 	else if (!(bus->dhd->busstate == DHD_BUS_SUSPEND)) {
5753 		dhd_bus_set_device_wake(bus, FALSE);
5754 	}
5755 }
5756 #endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
5757 
5758 #ifdef PCIE_INB_DW
5759 
5760 void
dhd_bus_inb_ack_pending_ds_req(dhd_bus_t * bus)5761 dhd_bus_inb_ack_pending_ds_req(dhd_bus_t *bus)
5762 {
5763 	/* The DHD_BUS_INB_DW_LOCK must be held before
5764 	* calling this function !!
5765 	*/
5766 	if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
5767 		DW_DEVICE_DS_DEV_SLEEP_PEND) &&
5768 		(bus->host_active_cnt == 0)) {
5769 		dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_DEV_SLEEP);
5770 		dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
5771 	}
5772 }
5773 
5774 int
dhd_bus_inb_set_device_wake(struct dhd_bus * bus,bool val)5775 dhd_bus_inb_set_device_wake(struct dhd_bus *bus, bool val)
5776 {
5777 	int timeleft;
5778 	unsigned long flags;
5779 	int ret;
5780 
5781 	if (!INBAND_DW_ENAB(bus)) {
5782 		return BCME_ERROR;
5783 	}
5784 
5785 	if (val) {
5786 		DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
5787 
5788 		/*
5789 		 * Reset the Door Bell Timeout value. So that the Watchdog
5790 		 * doesn't try to Deassert Device Wake, while we are in
5791 		 * the process of still Asserting the same.
5792 		 */
5793 		if (dhd_doorbell_timeout) {
5794 			dhd_timeout_start(&bus->doorbell_timer,
5795 				(dhd_doorbell_timeout * 1000) / dhd_watchdog_ms);
5796 		}
5797 
5798 		if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
5799 			DW_DEVICE_DS_DEV_SLEEP) {
5800 			/* Clear wait_for_ds_exit */
5801 			bus->wait_for_ds_exit = 0;
5802 			ret = dhdpcie_send_mb_data(bus, H2DMB_DS_DEVICE_WAKE_ASSERT);
5803 			if (ret != BCME_OK) {
5804 				DHD_ERROR(("Failed: assert Inband device_wake\n"));
5805 				bus->wait_for_ds_exit = 1;
5806 				DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5807 				ret = BCME_ERROR;
5808 				goto exit;
5809 			}
5810 			dhdpcie_bus_set_pcie_inband_dw_state(bus,
5811 				DW_DEVICE_DS_DISABLED_WAIT);
5812 			bus->inband_dw_assert_cnt++;
5813 		} else {
5814 			DHD_INFO(("Not in DS SLEEP state \n"));
5815 			DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5816 			ret = BCME_OK;
5817 			goto exit;
5818 		}
5819 
5820 		/*
5821 		 * Since we are going to wait/sleep .. release the lock.
5822 		 * The Device Wake sanity is still valid, because
5823 		 * a) If there is another context that comes in and tries
5824 		 *    to assert DS again and if it gets the lock, since
5825 		 *    ds_state would be now != DW_DEVICE_DS_DEV_SLEEP the
5826 		 *    context would return saying Not in DS Sleep.
5827 		 * b) If ther is another context that comes in and tries
5828 		 *    to de-assert DS and gets the lock,
5829 		 *    since the ds_state is != DW_DEVICE_DS_DEV_WAKE
5830 		 *    that context would return too. This can not happen
5831 		 *    since the watchdog is the only context that can
5832 		 *    De-Assert Device Wake and as the first step of
5833 		 *    Asserting the Device Wake, we have pushed out the
5834 		 *    Door Bell Timeout.
5835 		 *
5836 		 */
5837 		DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5838 
5839 		if (!CAN_SLEEP()) {
5840 			/* Called from context that cannot sleep */
5841 			OSL_DELAY(1000);
5842 			bus->wait_for_ds_exit = 1;
5843 		} else {
5844 			/* Wait for DS EXIT for DS_EXIT_TIMEOUT seconds */
5845 			timeleft = dhd_os_ds_exit_wait(bus->dhd, &bus->wait_for_ds_exit);
5846 			if (!bus->wait_for_ds_exit && timeleft == 0) {
5847 				DHD_ERROR(("DS-EXIT timeout\n"));
5848 				bus->inband_ds_exit_to_cnt++;
5849 				bus->ds_exit_timeout = 0;
5850 				ret = BCME_ERROR;
5851 				goto exit;
5852 			}
5853 		}
5854 
5855 		DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
5856 		dhdpcie_bus_set_pcie_inband_dw_state(bus,
5857 			DW_DEVICE_DS_DEV_WAKE);
5858 		DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5859 
5860 		ret = BCME_OK;
5861 	} else {
5862 		DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
5863 		if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
5864 			DW_DEVICE_DS_DEV_WAKE)) {
5865 			ret = dhdpcie_send_mb_data(bus, H2DMB_DS_DEVICE_WAKE_DEASSERT);
5866 			if (ret != BCME_OK) {
5867 				DHD_ERROR(("Failed: deassert Inband device_wake\n"));
5868 				DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5869 				goto exit;
5870 			}
5871 			dhdpcie_bus_set_pcie_inband_dw_state(bus,
5872 				DW_DEVICE_DS_ACTIVE);
5873 			bus->inband_dw_deassert_cnt++;
5874 		} else if ((dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
5875 			DW_DEVICE_DS_DEV_SLEEP_PEND) &&
5876 			(bus->host_active_cnt == 0)) {
5877 			dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_DEV_SLEEP);
5878 			dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
5879 		}
5880 
5881 		ret = BCME_OK;
5882 		DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
5883 	}
5884 
5885 exit:
5886 	return ret;
5887 }
5888 #endif /* PCIE_INB_DW */
5889 
5890 
5891 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
5892 int
dhd_bus_set_device_wake(struct dhd_bus * bus,bool val)5893 dhd_bus_set_device_wake(struct dhd_bus *bus, bool val)
5894 {
5895 	if (bus->ds_enabled) {
5896 #ifdef PCIE_INB_DW
5897 		if (INBAND_DW_ENAB(bus)) {
5898 			return dhd_bus_inb_set_device_wake(bus, val);
5899 		}
5900 #endif /* PCIE_INB_DW */
5901 #ifdef PCIE_OOB
5902 		if (OOB_DW_ENAB(bus)) {
5903 			return dhd_os_oob_set_device_wake(bus, val);
5904 		}
5905 #endif /* PCIE_OOB */
5906 	}
5907 	return BCME_OK;
5908 }
5909 #endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
5910 
5911 /** mailbox doorbell ring function */
5912 void
dhd_bus_ringbell(struct dhd_bus * bus,uint32 value)5913 dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
5914 {
5915 	/* Skip after sending D3_INFORM */
5916 	if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) {
5917 		DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :"
5918 			"busstate=%d, d3_suspend_pending=%d\n",
5919 			__FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
5920 		return;
5921 	}
5922 	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
5923 		(bus->sih->buscorerev == 4)) {
5924 		si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, PCIE_INTB, PCIE_INTB);
5925 	} else {
5926 		/* this is a pcie core register, not the config regsiter */
5927 		DHD_INFO(("%s: writing a door bell to the device\n", __FUNCTION__));
5928 		if (IDMA_ACTIVE(bus->dhd)) {
5929 			si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox_2,
5930 				~0, value);
5931 		} else {
5932 			si_corereg(bus->sih, bus->sih->buscoreidx,
5933 				PCIH2D_MailBox, ~0, 0x12345678);
5934 		}
5935 	}
5936 }
5937 
5938 /** mailbox doorbell ring function for IDMA/IFRM using dma channel2 */
5939 void
dhd_bus_ringbell_2(struct dhd_bus * bus,uint32 value,bool devwake)5940 dhd_bus_ringbell_2(struct dhd_bus *bus, uint32 value, bool devwake)
5941 {
5942 	/* this is a pcie core register, not the config regsiter */
5943 	/* Skip after sending D3_INFORM */
5944 	if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) {
5945 		DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :"
5946 			"busstate=%d, d3_suspend_pending=%d\n",
5947 			__FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
5948 		return;
5949 	}
5950 	DHD_INFO(("writing a door bell 2 to the device\n"));
5951 	si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox_2,
5952 		~0, value);
5953 }
5954 
5955 void
dhdpcie_bus_ringbell_fast(struct dhd_bus * bus,uint32 value)5956 dhdpcie_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
5957 {
5958 	/* Skip after sending D3_INFORM */
5959 	if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) {
5960 		DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :"
5961 			"busstate=%d, d3_suspend_pending=%d\n",
5962 			__FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
5963 		return;
5964 	}
5965 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
5966 	if (OOB_DW_ENAB(bus)) {
5967 		dhd_bus_set_device_wake(bus, TRUE);
5968 	}
5969 	dhd_bus_doorbell_timeout_reset(bus);
5970 #endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
5971 	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
5972 }
5973 
5974 void
dhdpcie_bus_ringbell_2_fast(struct dhd_bus * bus,uint32 value,bool devwake)5975 dhdpcie_bus_ringbell_2_fast(struct dhd_bus *bus, uint32 value, bool devwake)
5976 {
5977 	/* Skip after sending D3_INFORM */
5978 	if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) {
5979 		DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :"
5980 			"busstate=%d, d3_suspend_pending=%d\n",
5981 			__FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
5982 		return;
5983 	}
5984 #if defined(PCIE_OOB) || defined(PCIE_INB_DW)
5985 	if (devwake) {
5986 		if (OOB_DW_ENAB(bus)) {
5987 			dhd_bus_set_device_wake(bus, TRUE);
5988 		}
5989 	}
5990 	dhd_bus_doorbell_timeout_reset(bus);
5991 #endif /* defined(PCIE_OOB) || defined(PCIE_INB_DW) */
5992 
5993 	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_2_addr, value);
5994 }
5995 
5996 static void
dhd_bus_ringbell_oldpcie(struct dhd_bus * bus,uint32 value)5997 dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
5998 {
5999 	uint32 w;
6000 	/* Skip after sending D3_INFORM */
6001 	if (bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) {
6002 		DHD_ERROR(("%s: trying to ring the doorbell when in suspend state :"
6003 			"busstate=%d, d3_suspend_pending=%d\n",
6004 			__FUNCTION__, bus->dhd->busstate, bus->d3_suspend_pending));
6005 		return;
6006 	}
6007 	w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
6008 	W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
6009 }
6010 
6011 dhd_mb_ring_t
dhd_bus_get_mbintr_fn(struct dhd_bus * bus)6012 dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
6013 {
6014 	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
6015 		(bus->sih->buscorerev == 4)) {
6016 		bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
6017 			PCIMailBoxInt);
6018 		if (bus->pcie_mb_intr_addr) {
6019 			bus->pcie_mb_intr_osh = si_osh(bus->sih);
6020 			return dhd_bus_ringbell_oldpcie;
6021 		}
6022 	} else {
6023 		bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
6024 			PCIH2D_MailBox);
6025 		if (bus->pcie_mb_intr_addr) {
6026 			bus->pcie_mb_intr_osh = si_osh(bus->sih);
6027 			return dhdpcie_bus_ringbell_fast;
6028 		}
6029 	}
6030 	return dhd_bus_ringbell;
6031 }
6032 
6033 dhd_mb_ring_2_t
dhd_bus_get_mbintr_2_fn(struct dhd_bus * bus)6034 dhd_bus_get_mbintr_2_fn(struct dhd_bus *bus)
6035 {
6036 	bus->pcie_mb_intr_2_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
6037 		PCIH2D_MailBox_2);
6038 	if (bus->pcie_mb_intr_2_addr) {
6039 		bus->pcie_mb_intr_osh = si_osh(bus->sih);
6040 		return dhdpcie_bus_ringbell_2_fast;
6041 	}
6042 	return dhd_bus_ringbell_2;
6043 }
6044 
6045 bool BCMFASTPATH
dhd_bus_dpc(struct dhd_bus * bus)6046 dhd_bus_dpc(struct dhd_bus *bus)
6047 {
6048 	bool resched = FALSE;	  /* Flag indicating resched wanted */
6049 	unsigned long flags;
6050 
6051 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6052 
6053 	DHD_GENERAL_LOCK(bus->dhd, flags);
6054 	/* Check for only DHD_BUS_DOWN and not for DHD_BUS_DOWN_IN_PROGRESS
6055 	 * to avoid IOCTL Resumed On timeout when ioctl is waiting for response
6056 	 * and rmmod is fired in parallel, which will make DHD_BUS_DOWN_IN_PROGRESS
6057 	 * and if we return from here, then IOCTL response will never be handled
6058 	 */
6059 	if (bus->dhd->busstate == DHD_BUS_DOWN) {
6060 		DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
6061 		bus->intstatus = 0;
6062 		DHD_GENERAL_UNLOCK(bus->dhd, flags);
6063 		bus->dpc_return_busdown_count++;
6064 		return 0;
6065 	}
6066 #ifdef DHD_PCIE_RUNTIMEPM
6067 	bus->idlecount = 0;
6068 #endif /* DHD_PCIE_RUNTIMEPM */
6069 	DHD_BUS_BUSY_SET_IN_DPC(bus->dhd);
6070 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
6071 
6072 #ifdef DHD_READ_INTSTATUS_IN_DPC
6073 	if (bus->ipend) {
6074 		bus->ipend = FALSE;
6075 		bus->intstatus = dhdpcie_bus_intstatus(bus);
6076 		/* Check if the interrupt is ours or not */
6077 		if (bus->intstatus == 0) {
6078 			goto INTR_ON;
6079 		}
6080 		bus->intrcount++;
6081 	}
6082 #endif /* DHD_READ_INTSTATUS_IN_DPC */
6083 
6084 	resched = dhdpcie_bus_process_mailbox_intr(bus, bus->intstatus);
6085 	if (!resched) {
6086 		bus->intstatus = 0;
6087 #ifdef DHD_READ_INTSTATUS_IN_DPC
6088 INTR_ON:
6089 #endif /* DHD_READ_INTSTATUS_IN_DPC */
6090 		bus->dpc_intr_enable_count++;
6091 		dhdpcie_bus_intr_enable(bus); /* Enable back interrupt using Intmask!! */
6092 	}
6093 
6094 	DHD_GENERAL_LOCK(bus->dhd, flags);
6095 	DHD_BUS_BUSY_CLEAR_IN_DPC(bus->dhd);
6096 	dhd_os_busbusy_wake(bus->dhd);
6097 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
6098 
6099 	return resched;
6100 
6101 }
6102 
6103 
6104 int
dhdpcie_send_mb_data(dhd_bus_t * bus,uint32 h2d_mb_data)6105 dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
6106 {
6107 	uint32 cur_h2d_mb_data = 0;
6108 	unsigned long flags;
6109 
6110 	DHD_INFO_HW4(("%s: H2D_MB_DATA: 0x%08X\n", __FUNCTION__, h2d_mb_data));
6111 
6112 	if (bus->is_linkdown) {
6113 		DHD_ERROR(("%s: PCIe link was down\n", __FUNCTION__));
6114 		return BCME_ERROR;
6115 	}
6116 
6117 	DHD_GENERAL_LOCK(bus->dhd, flags);
6118 
6119 	if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6 && !bus->use_mailbox) {
6120 		DHD_INFO(("API rev is 6, sending mb data as H2D Ctrl message to dongle, 0x%04x\n",
6121 			h2d_mb_data));
6122 		/* Prevent asserting device_wake during doorbell ring for mb data to avoid loop. */
6123 #ifdef PCIE_OOB
6124 		bus->oob_enabled = FALSE;
6125 #endif /* PCIE_OOB */
6126 		if (dhd_prot_h2d_mbdata_send_ctrlmsg(bus->dhd, h2d_mb_data)) {
6127 			DHD_ERROR(("failure sending the H2D Mailbox message to firmware\n"));
6128 			goto fail;
6129 		}
6130 #ifdef PCIE_OOB
6131 		bus->oob_enabled = TRUE;
6132 #endif /* PCIE_OOB */
6133 		goto done;
6134 	}
6135 
6136 	dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
6137 
6138 	if (cur_h2d_mb_data != 0) {
6139 		uint32 i = 0;
6140 		DHD_INFO(("%s: GRRRRRRR: MB transaction is already pending 0x%04x\n", __FUNCTION__, cur_h2d_mb_data));
6141 		while ((i++ < 100) && cur_h2d_mb_data) {
6142 			OSL_DELAY(10);
6143 			dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, H2D_MB_DATA, 0);
6144 		}
6145 		if (i >= 100) {
6146 			DHD_ERROR(("%s : waited 1ms for the dngl "
6147 				"to ack the previous mb transaction\n", __FUNCTION__));
6148 			DHD_ERROR(("%s : MB transaction is still pending 0x%04x\n",
6149 				__FUNCTION__, cur_h2d_mb_data));
6150 		}
6151 	}
6152 
6153 	dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), H2D_MB_DATA, 0);
6154 	dhd_bus_gen_devmb_intr(bus);
6155 
6156 done:
6157 	if (h2d_mb_data == H2D_HOST_D3_INFORM) {
6158 		DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
6159 		/* Mark D3_INFORM in the atomic context to
6160 		 * skip ringing H2D DB after D3_INFORM
6161 		 */
6162 		bus->d3_suspend_pending = TRUE;
6163 		bus->d3_inform_cnt++;
6164 	}
6165 	if (h2d_mb_data == H2D_HOST_D0_INFORM_IN_USE) {
6166 		DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM_IN_USE to dongle\n", __FUNCTION__));
6167 		bus->d0_inform_in_use_cnt++;
6168 	}
6169 	if (h2d_mb_data == H2D_HOST_D0_INFORM) {
6170 		DHD_INFO_HW4(("%s: send H2D_HOST_D0_INFORM to dongle\n", __FUNCTION__));
6171 		bus->d0_inform_cnt++;
6172 	}
6173 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
6174 	return BCME_OK;
6175 
6176 fail:
6177 	DHD_GENERAL_UNLOCK(bus->dhd, flags);
6178 	return BCME_ERROR;
6179 }
6180 
6181 void
dhd_bus_handle_mb_data(dhd_bus_t * bus,uint32 d2h_mb_data)6182 dhd_bus_handle_mb_data(dhd_bus_t *bus, uint32 d2h_mb_data)
6183 {
6184 #ifdef PCIE_INB_DW
6185 	unsigned long flags = 0;
6186 #endif
6187 	DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
6188 
6189 	if (d2h_mb_data & D2H_DEV_FWHALT)  {
6190 		DHD_ERROR(("FW trap has happened\n"));
6191 		dhdpcie_checkdied(bus, NULL, 0);
6192 #ifdef SUPPORT_LINKDOWN_RECOVERY
6193 #ifdef CONFIG_ARCH_MSM
6194 		bus->no_cfg_restore = 1;
6195 #endif /* CONFIG_ARCH_MSM */
6196 #endif /* SUPPORT_LINKDOWN_RECOVERY */
6197 		dhd_os_check_hang(bus->dhd, 0, -EREMOTEIO);
6198 		return;
6199 	}
6200 	if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ)  {
6201 		if ((bus->dhd->busstate == DHD_BUS_SUSPEND || bus->d3_suspend_pending) &&
6202 			bus->wait_for_d3_ack) {
6203 			DHD_ERROR(("DS-ENTRY AFTER D3-ACK!!!!! QUITING\n"));
6204 			bus->dhd->busstate = DHD_BUS_DOWN;
6205 			return;
6206 		}
6207 		/* what should we do */
6208 		DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
6209 #ifdef PCIE_INB_DW
6210 		if (INBAND_DW_ENAB(bus)) {
6211 			DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
6212 			if (dhdpcie_bus_get_pcie_inband_dw_state(bus) == DW_DEVICE_DS_ACTIVE) {
6213 				dhdpcie_bus_set_pcie_inband_dw_state(bus,
6214 						DW_DEVICE_DS_DEV_SLEEP_PEND);
6215 				if (bus->host_active_cnt == 0) {
6216 					dhdpcie_bus_set_pcie_inband_dw_state(bus,
6217 						DW_DEVICE_DS_DEV_SLEEP);
6218 					dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
6219 				}
6220 			}
6221 			DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
6222 			dhd_os_ds_enter_wake(bus->dhd);
6223 		} else
6224 #endif /* PCIE_INB_DW */
6225 		{
6226 			dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
6227 		}
6228 		if (IDMA_DS_ENAB(bus->dhd)) {
6229 			bus->dongle_in_ds = TRUE;
6230 		}
6231 		DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
6232 	}
6233 	if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE)  {
6234 		/* what should we do */
6235 		bus->dongle_in_ds = FALSE;
6236 		DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
6237 #ifdef PCIE_INB_DW
6238 		if (INBAND_DW_ENAB(bus)) {
6239 			bus->inband_ds_exit_device_cnt++;
6240 			DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
6241 			if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
6242 					DW_DEVICE_DS_DISABLED_WAIT) {
6243 				/* wake up only if some one is waiting in
6244 				* DW_DEVICE_DS_DISABLED_WAIT state
6245 				* in this case the waiter will change the state
6246 				* to DW_DEVICE_DS_DEV_WAKE
6247 				*/
6248 				bus->wait_for_ds_exit = 1;
6249 				DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
6250 				dhd_os_ds_exit_wake(bus->dhd);
6251 			} else {
6252 				DHD_INFO(("D2H_MB_DATA: not in DW_DEVICE_DS_DISABLED_WAIT!\n"));
6253 				/*
6254 				* If there is no one waiting, then update the state from here
6255 				*/
6256 				bus->wait_for_ds_exit = 1;
6257 				dhdpcie_bus_set_pcie_inband_dw_state(bus,
6258 					DW_DEVICE_DS_DEV_WAKE);
6259 				DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
6260 			}
6261 		}
6262 #endif /* PCIE_INB_DW */
6263 	}
6264 	if (d2h_mb_data & D2HMB_DS_HOST_SLEEP_EXIT_ACK)  {
6265 		/* what should we do */
6266 		DHD_INFO(("D2H_MB_DATA: D0 ACK\n"));
6267 #ifdef PCIE_INB_DW
6268 		if (INBAND_DW_ENAB(bus)) {
6269 			DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
6270 			if (dhdpcie_bus_get_pcie_inband_dw_state(bus) ==
6271 				DW_DEVICE_HOST_WAKE_WAIT) {
6272 				dhdpcie_bus_set_pcie_inband_dw_state(bus, DW_DEVICE_DS_ACTIVE);
6273 			}
6274 			DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
6275 		}
6276 #endif /* PCIE_INB_DW */
6277 	}
6278 	if (d2h_mb_data & D2H_DEV_D3_ACK)  {
6279 		/* what should we do */
6280 		DHD_INFO_HW4(("D2H_MB_DATA: D3 ACK\n"));
6281 		if (!bus->wait_for_d3_ack) {
6282 			/* Disable dongle Interrupts Immediately after D3 */
6283 			bus->suspend_intr_disable_count++;
6284 			dhdpcie_bus_intr_disable(bus);
6285 #if defined(DHD_HANG_SEND_UP_TEST)
6286 			if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
6287 				DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
6288 			} else {
6289 				bus->wait_for_d3_ack = 1;
6290 				dhd_os_d3ack_wake(bus->dhd);
6291 			}
6292 #else /* DHD_HANG_SEND_UP_TEST */
6293 			bus->wait_for_d3_ack = 1;
6294 			dhd_os_d3ack_wake(bus->dhd);
6295 #endif /* DHD_HANG_SEND_UP_TEST */
6296 		}
6297 	}
6298 }
6299 
6300 static void
dhdpcie_handle_mb_data(dhd_bus_t * bus)6301 dhdpcie_handle_mb_data(dhd_bus_t *bus)
6302 {
6303 	uint32 d2h_mb_data = 0;
6304 	uint32 zero = 0;
6305 	dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
6306 	if (D2H_DEV_MB_INVALIDATED(d2h_mb_data)) {
6307 		DHD_ERROR(("%s: Invalid D2H_MB_DATA: 0x%08x\n",
6308 			__FUNCTION__, d2h_mb_data));
6309 		return;
6310 	}
6311 
6312 	dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
6313 
6314 	DHD_INFO_HW4(("%s: D2H_MB_DATA: 0x%04x\n", __FUNCTION__, d2h_mb_data));
6315 	if (d2h_mb_data & D2H_DEV_FWHALT)  {
6316 		DHD_ERROR(("FW trap has happened\n"));
6317 		dhdpcie_checkdied(bus, NULL, 0);
6318 		/* not ready yet dhd_os_ind_firmware_stall(bus->dhd); */
6319 		return;
6320 	}
6321 	if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ)  {
6322 		/* what should we do */
6323 		DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP REQ\n", __FUNCTION__));
6324 		dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
6325 		if (IDMA_DS_ENAB(bus->dhd)) {
6326 			bus->dongle_in_ds = TRUE;
6327 		}
6328 		DHD_INFO(("%s: D2H_MB_DATA: sent DEEP SLEEP ACK\n", __FUNCTION__));
6329 	}
6330 	if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE)  {
6331 		/* what should we do */
6332 		DHD_INFO(("%s: D2H_MB_DATA: DEEP SLEEP EXIT\n", __FUNCTION__));
6333 		bus->dongle_in_ds = FALSE;
6334 	}
6335 	if (d2h_mb_data & D2H_DEV_D3_ACK)  {
6336 		/* what should we do */
6337 		DHD_INFO_HW4(("%s: D2H_MB_DATA: D3 ACK\n", __FUNCTION__));
6338 		if (!bus->wait_for_d3_ack) {
6339 #if defined(DHD_HANG_SEND_UP_TEST)
6340 			if (bus->dhd->req_hang_type == HANG_REASON_D3_ACK_TIMEOUT) {
6341 				DHD_ERROR(("TEST HANG: Skip to process D3 ACK\n"));
6342 			} else {
6343 				bus->wait_for_d3_ack = 1;
6344 				dhd_os_d3ack_wake(bus->dhd);
6345 			}
6346 #else /* DHD_HANG_SEND_UP_TEST */
6347 			bus->wait_for_d3_ack = 1;
6348 			dhd_os_d3ack_wake(bus->dhd);
6349 #endif /* DHD_HANG_SEND_UP_TEST */
6350 		}
6351 	}
6352 }
6353 
6354 static void
dhdpcie_read_handle_mb_data(dhd_bus_t * bus)6355 dhdpcie_read_handle_mb_data(dhd_bus_t *bus)
6356 {
6357 	uint32 d2h_mb_data = 0;
6358 	uint32 zero = 0;
6359 
6360 	dhd_bus_cmn_readshared(bus, &d2h_mb_data, D2H_MB_DATA, 0);
6361 	if (!d2h_mb_data)
6362 		return;
6363 
6364 	dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), D2H_MB_DATA, 0);
6365 
6366 	dhd_bus_handle_mb_data(bus, d2h_mb_data);
6367 }
6368 
6369 static bool
dhdpcie_bus_process_mailbox_intr(dhd_bus_t * bus,uint32 intstatus)6370 dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
6371 {
6372 	bool resched = FALSE;
6373 
6374 	if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
6375 		(bus->sih->buscorerev == 4)) {
6376 		/* Msg stream interrupt */
6377 		if (intstatus & I_BIT1) {
6378 			resched = dhdpci_bus_read_frames(bus);
6379 		} else if (intstatus & I_BIT0) {
6380 			/* do nothing for Now */
6381 		}
6382 	} else {
6383 		if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
6384 			bus->api.handle_mb_data(bus);
6385 
6386 		if (bus->dhd->busstate == DHD_BUS_SUSPEND) {
6387 			goto exit;
6388 		}
6389 
6390 		if (intstatus & PCIE_MB_D2H_MB_MASK) {
6391 			resched = dhdpci_bus_read_frames(bus);
6392 		}
6393 	}
6394 
6395 exit:
6396 	return resched;
6397 }
6398 
6399 static bool
dhdpci_bus_read_frames(dhd_bus_t * bus)6400 dhdpci_bus_read_frames(dhd_bus_t *bus)
6401 {
6402 	bool more = FALSE;
6403 
6404 	/* First check if there a FW trap */
6405 	if ((bus->api.fw_rev >= PCIE_SHARED_VERSION_6) &&
6406 		(bus->dhd->dongle_trap_data = dhd_prot_process_trapbuf(bus->dhd))) {
6407 		dhd_bus_handle_mb_data(bus, D2H_DEV_FWHALT);
6408 		return FALSE;
6409 	}
6410 
6411 	/* There may be frames in both ctrl buf and data buf; check ctrl buf first */
6412 	DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
6413 
6414 	dhd_prot_process_ctrlbuf(bus->dhd);
6415 	/* Unlock to give chance for resp to be handled */
6416 	DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
6417 
6418 	DHD_PERIM_LOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
6419 	/* update the flow ring cpls */
6420 	dhd_update_txflowrings(bus->dhd);
6421 
6422 	/* With heavy TX traffic, we could get a lot of TxStatus
6423 	 * so add bound
6424 	 */
6425 	more |= dhd_prot_process_msgbuf_txcpl(bus->dhd, dhd_txbound);
6426 
6427 	/* With heavy RX traffic, this routine potentially could spend some time
6428 	 * processing RX frames without RX bound
6429 	 */
6430 	more |= dhd_prot_process_msgbuf_rxcpl(bus->dhd, dhd_rxbound);
6431 
6432 	/* Process info ring completion messages */
6433 	more |= dhd_prot_process_msgbuf_infocpl(bus->dhd, DHD_INFORING_BOUND);
6434 
6435 #ifdef IDLE_TX_FLOW_MGMT
6436 	if (bus->enable_idle_flowring_mgmt) {
6437 		/* Look for idle flow rings */
6438 		dhd_bus_check_idle_scan(bus);
6439 	}
6440 #endif /* IDLE_TX_FLOW_MGMT */
6441 
6442 	/* don't talk to the dongle if fw is about to be reloaded */
6443 	if (bus->dhd->hang_was_sent) {
6444 		more = FALSE;
6445 	}
6446 	DHD_PERIM_UNLOCK_ALL((bus->dhd->fwder_unit % FWDER_MAX_UNIT));
6447 
6448 #ifdef SUPPORT_LINKDOWN_RECOVERY
6449 	if (bus->read_shm_fail) {
6450 		/* Read interrupt state once again to confirm linkdown */
6451 		int intstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
6452 		if (intstatus != (uint32)-1) {
6453 			DHD_ERROR(("%s: read SHM failed but intstatus is valid\n", __FUNCTION__));
6454 #ifdef DHD_FW_COREDUMP
6455 			if (bus->dhd->memdump_enabled) {
6456 				DHD_OS_WAKE_LOCK(bus->dhd);
6457 				bus->dhd->memdump_type = DUMP_TYPE_READ_SHM_FAIL;
6458 				dhd_bus_mem_dump(bus->dhd);
6459 				DHD_OS_WAKE_UNLOCK(bus->dhd);
6460 			}
6461 #endif /* DHD_FW_COREDUMP */
6462 			bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
6463 			dhd_os_send_hang_message(bus->dhd);
6464 		} else {
6465 			DHD_ERROR(("%s: Link is Down.\n", __FUNCTION__));
6466 #ifdef CONFIG_ARCH_MSM
6467 			bus->no_cfg_restore = 1;
6468 #endif /* CONFIG_ARCH_MSM */
6469 			bus->is_linkdown = 1;
6470 			bus->dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
6471 			dhd_os_send_hang_message(bus->dhd);
6472 		}
6473 	}
6474 #endif /* SUPPORT_LINKDOWN_RECOVERY */
6475 	return more;
6476 }
6477 
6478 bool
dhdpcie_tcm_valid(dhd_bus_t * bus)6479 dhdpcie_tcm_valid(dhd_bus_t *bus)
6480 {
6481 	uint32 addr = 0;
6482 	int rv;
6483 	uint32 shaddr = 0;
6484 	pciedev_shared_t sh;
6485 
6486 	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
6487 
6488 	/* Read last word in memory to determine address of pciedev_shared structure */
6489 	addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
6490 
6491 	if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
6492 		(addr > shaddr)) {
6493 		DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid addr\n",
6494 			__FUNCTION__, addr));
6495 		return FALSE;
6496 	}
6497 
6498 	/* Read hndrte_shared structure */
6499 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&sh,
6500 		sizeof(pciedev_shared_t))) < 0) {
6501 		DHD_ERROR(("Failed to read PCIe shared struct with %d\n", rv));
6502 		return FALSE;
6503 	}
6504 
6505 	/* Compare any field in pciedev_shared_t */
6506 	if (sh.console_addr != bus->pcie_sh->console_addr) {
6507 		DHD_ERROR(("Contents of pciedev_shared_t structure are not matching.\n"));
6508 		return FALSE;
6509 	}
6510 
6511 	return TRUE;
6512 }
6513 
6514 static void
dhdpcie_update_bus_api_revisions(uint32 firmware_api_version,uint32 host_api_version)6515 dhdpcie_update_bus_api_revisions(uint32 firmware_api_version, uint32 host_api_version)
6516 {
6517 	snprintf(bus_api_revision, BUS_API_REV_STR_LEN, "\nBus API revisions:(FW rev%d)(DHD rev%d)",
6518 			firmware_api_version, host_api_version);
6519 	return;
6520 }
6521 
6522 static bool
dhdpcie_check_firmware_compatible(uint32 firmware_api_version,uint32 host_api_version)6523 dhdpcie_check_firmware_compatible(uint32 firmware_api_version, uint32 host_api_version)
6524 {
6525 	bool retcode = FALSE;
6526 
6527 	DHD_INFO(("firmware api revision %d, host api revision %d\n",
6528 		firmware_api_version, host_api_version));
6529 
6530 	switch (firmware_api_version) {
6531 	case PCIE_SHARED_VERSION_7:
6532 	case PCIE_SHARED_VERSION_6:
6533 	case PCIE_SHARED_VERSION_5:
6534 		retcode = TRUE;
6535 		break;
6536 	default:
6537 		if (firmware_api_version <= host_api_version)
6538 			retcode = TRUE;
6539 	}
6540 	return retcode;
6541 }
6542 
6543 static int
dhdpcie_readshared(dhd_bus_t * bus)6544 dhdpcie_readshared(dhd_bus_t *bus)
6545 {
6546 	uint32 addr = 0;
6547 	int rv, dma_indx_wr_buf, dma_indx_rd_buf;
6548 	uint32 shaddr = 0;
6549 	pciedev_shared_t *sh = bus->pcie_sh;
6550 	dhd_timeout_t tmo;
6551 
6552 	shaddr = bus->dongle_ram_base + bus->ramsize - 4;
6553 	/* start a timer for 5 seconds */
6554 	dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
6555 
6556 	while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
6557 		/* Read last word in memory to determine address of pciedev_shared structure */
6558 		addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
6559 	}
6560 
6561 	if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
6562 		(addr > shaddr)) {
6563 		DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
6564 			__FUNCTION__, addr));
6565 		DHD_ERROR(("%s: Waited %u usec, dongle is not ready\n", __FUNCTION__, tmo.elapsed));
6566 		return BCME_ERROR;
6567 	} else {
6568 		bus->shared_addr = (ulong)addr;
6569 		DHD_ERROR(("%s: PCIe shared addr (0x%08x) read took %u usec "
6570 			"before dongle is ready\n", __FUNCTION__, addr, tmo.elapsed));
6571 	}
6572 
6573 	/* Read hndrte_shared structure */
6574 	if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
6575 		sizeof(pciedev_shared_t))) < 0) {
6576 		DHD_ERROR(("%s: Failed to read PCIe shared struct with %d\n", __FUNCTION__, rv));
6577 		return rv;
6578 	}
6579 
6580 	/* Endianness */
6581 	sh->flags = ltoh32(sh->flags);
6582 	sh->trap_addr = ltoh32(sh->trap_addr);
6583 	sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
6584 	sh->assert_file_addr = ltoh32(sh->assert_file_addr);
6585 	sh->assert_line = ltoh32(sh->assert_line);
6586 	sh->console_addr = ltoh32(sh->console_addr);
6587 	sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
6588 	sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
6589 	sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
6590 	sh->flags2 = ltoh32(sh->flags2);
6591 
6592 	/* load bus console address */
6593 	bus->console_addr = sh->console_addr;
6594 
6595 	/* Read the dma rx offset */
6596 	bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
6597 	dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
6598 
6599 	DHD_INFO(("%s: DMA RX offset from shared Area %d\n", __FUNCTION__, bus->dma_rxoffset));
6600 
6601 	bus->api.fw_rev = sh->flags & PCIE_SHARED_VERSION_MASK;
6602 	if (!(dhdpcie_check_firmware_compatible(bus->api.fw_rev, PCIE_SHARED_VERSION)))
6603 	{
6604 		DHD_ERROR(("%s: pcie_shared version %d in dhd "
6605 		           "is older than pciedev_shared version %d in dongle\n",
6606 		           __FUNCTION__, PCIE_SHARED_VERSION,
6607 		           bus->api.fw_rev));
6608 		return BCME_ERROR;
6609 	}
6610 	dhdpcie_update_bus_api_revisions(bus->api.fw_rev, PCIE_SHARED_VERSION);
6611 
6612 	bus->rw_index_sz = (sh->flags & PCIE_SHARED_2BYTE_INDICES) ?
6613 		sizeof(uint16) : sizeof(uint32);
6614 	DHD_INFO(("%s: Dongle advertizes %d size indices\n",
6615 		__FUNCTION__, bus->rw_index_sz));
6616 
6617 #ifdef IDLE_TX_FLOW_MGMT
6618 	if (sh->flags & PCIE_SHARED_IDLE_FLOW_RING) {
6619 		DHD_ERROR(("%s: FW Supports IdleFlow ring managment!\n",
6620 			__FUNCTION__));
6621 		bus->enable_idle_flowring_mgmt = TRUE;
6622 	}
6623 #endif /* IDLE_TX_FLOW_MGMT */
6624 
6625 	bus->dhd->idma_enable = (sh->flags & PCIE_SHARED_IDMA) ? TRUE : FALSE;
6626 	bus->dhd->ifrm_enable = (sh->flags & PCIE_SHARED_IFRM) ? TRUE : FALSE;
6627 
6628 	bus->dhd->idma_retention_ds = (sh->flags & PCIE_SHARED_IDMA_RETENTION_DS) ? TRUE : FALSE;
6629 
6630 	bus->dhd->d2h_sync_mode = sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK;
6631 
6632 	/* Does the FW support DMA'ing r/w indices */
6633 	if (sh->flags & PCIE_SHARED_DMA_INDEX) {
6634 		if (!bus->dhd->dma_ring_upd_overwrite) {
6635 			{
6636 				if (!IFRM_ENAB(bus->dhd)) {
6637 					bus->dhd->dma_h2d_ring_upd_support = TRUE;
6638 				}
6639 				bus->dhd->dma_d2h_ring_upd_support = TRUE;
6640 			}
6641 		}
6642 
6643 		if (bus->dhd->dma_d2h_ring_upd_support)
6644 			bus->dhd->d2h_sync_mode = 0;
6645 
6646 		DHD_INFO(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
6647 			__FUNCTION__,
6648 			(bus->dhd->dma_h2d_ring_upd_support ? 1 : 0),
6649 			(bus->dhd->dma_d2h_ring_upd_support ? 1 : 0)));
6650 	} else if (!(sh->flags & PCIE_SHARED_D2H_SYNC_MODE_MASK)) {
6651 		DHD_ERROR(("%s FW has to support either dma indices or d2h sync\n",
6652 			__FUNCTION__));
6653 		return BCME_UNSUPPORTED;
6654 	} else {
6655 		bus->dhd->dma_h2d_ring_upd_support = FALSE;
6656 		bus->dhd->dma_d2h_ring_upd_support = FALSE;
6657 	}
6658 
6659 	/* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
6660 	{
6661 		ring_info_t  ring_info;
6662 
6663 		if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
6664 			(uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
6665 			return rv;
6666 
6667 		bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
6668 		bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
6669 
6670 
6671 		if (bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
6672 			bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
6673 			bus->max_submission_rings = ltoh16(ring_info.max_submission_queues);
6674 			bus->max_completion_rings = ltoh16(ring_info.max_completion_rings);
6675 			bus->max_cmn_rings = bus->max_submission_rings - bus->max_tx_flowrings;
6676 			bus->api.handle_mb_data = dhdpcie_read_handle_mb_data;
6677 			bus->use_mailbox = sh->flags & PCIE_SHARED_USE_MAILBOX;
6678 		}
6679 		else {
6680 			bus->max_tx_flowrings = ltoh16(ring_info.max_tx_flowrings);
6681 			bus->max_submission_rings = bus->max_tx_flowrings;
6682 			bus->max_completion_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
6683 			bus->max_cmn_rings = BCMPCIE_H2D_COMMON_MSGRINGS;
6684 			bus->api.handle_mb_data = dhdpcie_handle_mb_data;
6685 		}
6686 		if (bus->max_completion_rings == 0) {
6687 			DHD_ERROR(("dongle completion rings are invalid %d\n",
6688 				bus->max_completion_rings));
6689 			return BCME_ERROR;
6690 		}
6691 		if (bus->max_submission_rings == 0) {
6692 			DHD_ERROR(("dongle submission rings are invalid %d\n",
6693 				bus->max_submission_rings));
6694 			return BCME_ERROR;
6695 		}
6696 		if (bus->max_tx_flowrings == 0) {
6697 			DHD_ERROR(("dongle txflow rings are invalid %d\n", bus->max_tx_flowrings));
6698 			return BCME_ERROR;
6699 		}
6700 
6701 		/* If both FW and Host support DMA'ing indices, allocate memory and notify FW
6702 		 * The max_sub_queues is read from FW initialized ring_info
6703 		 */
6704 		if (bus->dhd->dma_h2d_ring_upd_support || IDMA_ENAB(bus->dhd)) {
6705 			dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
6706 				H2D_DMA_INDX_WR_BUF, bus->max_submission_rings);
6707 			dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
6708 				D2H_DMA_INDX_RD_BUF, bus->max_completion_rings);
6709 
6710 			if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
6711 				DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
6712 						"Host will use w/r indices in TCM\n",
6713 						__FUNCTION__));
6714 				bus->dhd->dma_h2d_ring_upd_support = FALSE;
6715 				bus->dhd->idma_enable = FALSE;
6716 			}
6717 		}
6718 
6719 		if (bus->dhd->dma_d2h_ring_upd_support) {
6720 			dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
6721 				D2H_DMA_INDX_WR_BUF, bus->max_completion_rings);
6722 			dma_indx_rd_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
6723 				H2D_DMA_INDX_RD_BUF, bus->max_submission_rings);
6724 
6725 			if ((dma_indx_wr_buf != BCME_OK) || (dma_indx_rd_buf != BCME_OK)) {
6726 				DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
6727 						"Host will use w/r indices in TCM\n",
6728 						__FUNCTION__));
6729 				bus->dhd->dma_d2h_ring_upd_support = FALSE;
6730 			}
6731 		}
6732 
6733 		if (IFRM_ENAB(bus->dhd)) {
6734 			dma_indx_wr_buf = dhd_prot_dma_indx_init(bus->dhd, bus->rw_index_sz,
6735 				H2D_IFRM_INDX_WR_BUF, bus->max_tx_flowrings);
6736 
6737 			if (dma_indx_wr_buf != BCME_OK) {
6738 				DHD_ERROR(("%s: Failed to alloc memory for Implicit DMA\n",
6739 						__FUNCTION__));
6740 				bus->dhd->ifrm_enable = FALSE;
6741 			}
6742 		}
6743 
6744 		/* read ringmem and ringstate ptrs from shared area and store in host variables */
6745 		dhd_fillup_ring_sharedptr_info(bus, &ring_info);
6746 		if (dhd_msg_level & DHD_INFO_VAL) {
6747 			bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
6748 		}
6749 		DHD_INFO(("%s: ring_info\n", __FUNCTION__));
6750 
6751 		DHD_ERROR(("%s: max H2D queues %d\n",
6752 			__FUNCTION__, ltoh16(ring_info.max_tx_flowrings)));
6753 
6754 		DHD_INFO(("mail box address\n"));
6755 		DHD_INFO(("%s: h2d_mb_data_ptr_addr 0x%04x\n",
6756 			__FUNCTION__, bus->h2d_mb_data_ptr_addr));
6757 		DHD_INFO(("%s: d2h_mb_data_ptr_addr 0x%04x\n",
6758 			__FUNCTION__, bus->d2h_mb_data_ptr_addr));
6759 	}
6760 
6761 	DHD_INFO(("%s: d2h_sync_mode 0x%08x\n",
6762 		__FUNCTION__, bus->dhd->d2h_sync_mode));
6763 
6764 	bus->dhd->d2h_hostrdy_supported =
6765 		((sh->flags & PCIE_SHARED_HOSTRDY_SUPPORT) == PCIE_SHARED_HOSTRDY_SUPPORT);
6766 
6767 #ifdef PCIE_OOB
6768 	bus->dhd->d2h_no_oob_dw = (sh->flags & PCIE_SHARED_NO_OOB_DW) ? TRUE : FALSE;
6769 #endif /* PCIE_OOB */
6770 
6771 #ifdef PCIE_INB_DW
6772 	bus->dhd->d2h_inband_dw = (sh->flags & PCIE_SHARED_INBAND_DS) ? TRUE : FALSE;
6773 #endif /* PCIE_INB_DW */
6774 
6775 #if defined(PCIE_OOB) && defined(PCIE_INB_DW)
6776 	DHD_ERROR(("FW supports Inband dw ? %s oob dw ? %s\n",
6777 		bus->dhd->d2h_inband_dw ? "Y":"N",
6778 		bus->dhd->d2h_no_oob_dw ? "N":"Y"));
6779 #endif /* defined(PCIE_OOB) && defined(PCIE_INB_DW) */
6780 
6781 	bus->dhd->ext_trap_data_supported =
6782 		((sh->flags2 & PCIE_SHARED2_EXTENDED_TRAP_DATA) == PCIE_SHARED2_EXTENDED_TRAP_DATA);
6783 
6784 	return BCME_OK;
6785 } /* dhdpcie_readshared */
6786 
6787 /** Read ring mem and ring state ptr info from shared memory area in device memory */
6788 static void
dhd_fillup_ring_sharedptr_info(dhd_bus_t * bus,ring_info_t * ring_info)6789 dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info)
6790 {
6791 	uint16 i = 0;
6792 	uint16 j = 0;
6793 	uint32 tcm_memloc;
6794 	uint32	d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr;
6795 	uint16  max_tx_flowrings = bus->max_tx_flowrings;
6796 
6797 	/* Ring mem ptr info */
6798 	/* Alloated in the order
6799 		H2D_MSGRING_CONTROL_SUBMIT              0
6800 		H2D_MSGRING_RXPOST_SUBMIT               1
6801 		D2H_MSGRING_CONTROL_COMPLETE            2
6802 		D2H_MSGRING_TX_COMPLETE                 3
6803 		D2H_MSGRING_RX_COMPLETE                 4
6804 	*/
6805 
6806 	{
6807 		/* ringmemptr holds start of the mem block address space */
6808 		tcm_memloc = ltoh32(ring_info->ringmem_ptr);
6809 
6810 		/* Find out ringmem ptr for each ring common  ring */
6811 		for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) {
6812 			bus->ring_sh[i].ring_mem_addr = tcm_memloc;
6813 			/* Update mem block */
6814 			tcm_memloc = tcm_memloc + sizeof(ring_mem_t);
6815 			DHD_INFO(("%s: ring id %d ring mem addr 0x%04x \n", __FUNCTION__,
6816 				i, bus->ring_sh[i].ring_mem_addr));
6817 		}
6818 	}
6819 
6820 	/* Ring state mem ptr info */
6821 	{
6822 		d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr);
6823 		d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr);
6824 		h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr);
6825 		h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr);
6826 
6827 		/* Store h2d common ring write/read pointers */
6828 		for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) {
6829 			bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
6830 			bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
6831 
6832 			/* update mem block */
6833 			h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
6834 			h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
6835 
6836 			DHD_INFO(("%s: h2d w/r : idx %d write %x read %x \n", __FUNCTION__, i,
6837 				bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
6838 		}
6839 
6840 		/* Store d2h common ring write/read pointers */
6841 		for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) {
6842 			bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
6843 			bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
6844 
6845 			/* update mem block */
6846 			d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
6847 			d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
6848 
6849 			DHD_INFO(("%s: d2h w/r : idx %d write %x read %x \n", __FUNCTION__, i,
6850 				bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
6851 		}
6852 
6853 		/* Store txflow ring write/read pointers */
6854 		if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
6855 			max_tx_flowrings -= BCMPCIE_H2D_COMMON_MSGRINGS;
6856 		} else {
6857 			/* Account for Debug info h2d ring located after the last tx flow ring */
6858 			max_tx_flowrings = max_tx_flowrings + 1;
6859 		}
6860 		for (j = 0; j < max_tx_flowrings; i++, j++)
6861 		{
6862 			bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
6863 			bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
6864 
6865 			/* update mem block */
6866 			h2d_w_idx_ptr = h2d_w_idx_ptr + bus->rw_index_sz;
6867 			h2d_r_idx_ptr = h2d_r_idx_ptr + bus->rw_index_sz;
6868 
6869 			DHD_INFO(("%s: FLOW Rings h2d w/r : idx %d write %x read %x \n",
6870 				__FUNCTION__, i,
6871 				bus->ring_sh[i].ring_state_w,
6872 				bus->ring_sh[i].ring_state_r));
6873 		}
6874 		/* store wr/rd pointers for  debug info completion ring */
6875 		bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
6876 		bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
6877 		d2h_w_idx_ptr = d2h_w_idx_ptr + bus->rw_index_sz;
6878 		d2h_r_idx_ptr = d2h_r_idx_ptr + bus->rw_index_sz;
6879 		DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
6880 			bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
6881 	}
6882 } /* dhd_fillup_ring_sharedptr_info */
6883 
6884 /**
6885  * Initialize bus module: prepare for communication with the dongle. Called after downloading
6886  * firmware into the dongle.
6887  */
dhd_bus_init(dhd_pub_t * dhdp,bool enforce_mutex)6888 int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
6889 {
6890 	dhd_bus_t *bus = dhdp->bus;
6891 	int  ret = 0;
6892 
6893 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6894 
6895 	ASSERT(bus->dhd);
6896 	if (!bus->dhd)
6897 		return 0;
6898 
6899 	/* Make sure we're talking to the core. */
6900 	bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
6901 	ASSERT(bus->reg != NULL);
6902 
6903 	/* before opening up bus for data transfer, check if shared are is intact */
6904 	ret = dhdpcie_readshared(bus);
6905 	if (ret < 0) {
6906 		DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
6907 		return ret;
6908 	}
6909 
6910 	/* Make sure we're talking to the core. */
6911 	bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
6912 	ASSERT(bus->reg != NULL);
6913 
6914 	/* Set bus state according to enable result */
6915 	dhdp->busstate = DHD_BUS_DATA;
6916 	bus->d3_suspend_pending = FALSE;
6917 
6918 #if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
6919 	if (bus->pcie_sh->flags2 & PCIE_SHARED_D2H_D11_TX_STATUS) {
6920 		uint32 flags2 = bus->pcie_sh->flags2;
6921 		uint32 addr;
6922 
6923 		addr = bus->shared_addr + OFFSETOF(pciedev_shared_t, flags2);
6924 		flags2 |= PCIE_SHARED_H2D_D11_TX_STATUS;
6925 		ret = dhdpcie_bus_membytes(bus, TRUE, addr,
6926 			(uint8 *)&flags2, sizeof(flags2));
6927 		if (ret < 0) {
6928 			DHD_ERROR(("%s: update flag bit (H2D_D11_TX_STATUS) failed\n",
6929 				__FUNCTION__));
6930 			return ret;
6931 		}
6932 		bus->pcie_sh->flags2 = flags2;
6933 		bus->dhd->d11_tx_status = TRUE;
6934 	}
6935 #endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
6936 
6937 	if (!dhd_download_fw_on_driverload)
6938 		dhd_dpc_enable(bus->dhd);
6939 	/* Enable the interrupt after device is up */
6940 	dhdpcie_bus_intr_enable(bus);
6941 
6942 	/* bcmsdh_intr_unmask(bus->sdh); */
6943 #ifdef DHD_PCIE_RUNTIMEPM
6944 	bus->idlecount = 0;
6945 	bus->idletime = (int32)MAX_IDLE_COUNT;
6946 	init_waitqueue_head(&bus->rpm_queue);
6947 	mutex_init(&bus->pm_lock);
6948 #else
6949 	bus->idletime = 0;
6950 #endif /* DHD_PCIE_RUNTIMEPM */
6951 
6952 #ifdef PCIE_INB_DW
6953 	/* Initialize the lock to serialize Device Wake Inband activities */
6954 	if (!bus->inb_lock) {
6955 		bus->inb_lock = dhd_os_spin_lock_init(bus->dhd->osh);
6956 	}
6957 #endif
6958 
6959 
6960 	/* Make use_d0_inform TRUE for Rev 5 for backward compatibility */
6961 	if (bus->api.fw_rev < PCIE_SHARED_VERSION_6) {
6962 		bus->use_d0_inform = TRUE;
6963 	} else {
6964 		bus->use_d0_inform = FALSE;
6965 	}
6966 
6967 	return ret;
6968 }
6969 
6970 static void
dhdpcie_init_shared_addr(dhd_bus_t * bus)6971 dhdpcie_init_shared_addr(dhd_bus_t *bus)
6972 {
6973 	uint32 addr = 0;
6974 	uint32 val = 0;
6975 	addr = bus->dongle_ram_base + bus->ramsize - 4;
6976 #ifdef DHD_PCIE_RUNTIMEPM
6977 	dhdpcie_runtime_bus_wake(bus->dhd, TRUE, __builtin_return_address(0));
6978 #endif /* DHD_PCIE_RUNTIMEPM */
6979 	dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
6980 }
6981 
6982 
6983 bool
dhdpcie_chipmatch(uint16 vendor,uint16 device)6984 dhdpcie_chipmatch(uint16 vendor, uint16 device)
6985 {
6986 	if (vendor != PCI_VENDOR_ID_BROADCOM) {
6987 #ifndef DHD_EFI
6988 		DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
6989 			vendor, device));
6990 #endif /* DHD_EFI */
6991 		return (-ENODEV);
6992 	}
6993 
6994 	if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) ||
6995 		(device == BCM4350_D11AC5G_ID) || (device == BCM4350_CHIP_ID) ||
6996 		(device == BCM43569_CHIP_ID))
6997 		return 0;
6998 
6999 	if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) ||
7000 		(device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID))
7001 		return 0;
7002 
7003 	if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) ||
7004 		(device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID))
7005 		return 0;
7006 
7007 	if ((device == BCM4371_D11AC_ID) || (device == BCM4371_D11AC2G_ID) ||
7008 		(device == BCM4371_D11AC5G_ID) || (device == BCM4371_CHIP_ID))
7009 		return 0;
7010 
7011 	if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) ||
7012 		(device == BCM4345_D11AC5G_ID) || BCM4345_CHIP(device))
7013 		return 0;
7014 
7015 	if ((device == BCM43452_D11AC_ID) || (device == BCM43452_D11AC2G_ID) ||
7016 		(device == BCM43452_D11AC5G_ID))
7017 		return 0;
7018 
7019 	if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) ||
7020 		(device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID))
7021 		return 0;
7022 
7023 	if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) ||
7024 		(device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID))
7025 		return 0;
7026 
7027 	if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) ||
7028 		(device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID))
7029 		return 0;
7030 
7031 	if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) ||
7032 		(device == BCM4358_D11AC5G_ID))
7033 		return 0;
7034 
7035 	if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) ||
7036 		(device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID))
7037 		return 0;
7038 
7039 	if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) ||
7040 		(device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID))
7041 		return 0;
7042 
7043 	if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) ||
7044 		(device == BCM4359_D11AC5G_ID))
7045 		return 0;
7046 
7047 	if ((device == BCM43596_D11AC_ID) || (device == BCM43596_D11AC2G_ID) ||
7048 		(device == BCM43596_D11AC5G_ID))
7049 		return 0;
7050 
7051 	if ((device == BCM43597_D11AC_ID) || (device == BCM43597_D11AC2G_ID) ||
7052 		(device == BCM43597_D11AC5G_ID))
7053 		return 0;
7054 
7055 	if ((device == BCM4364_D11AC_ID) || (device == BCM4364_D11AC2G_ID) ||
7056 		(device == BCM4364_D11AC5G_ID) || (device == BCM4364_CHIP_ID))
7057 		return 0;
7058 
7059 	if ((device == BCM4347_D11AC_ID) || (device == BCM4347_D11AC2G_ID) ||
7060 		(device == BCM4347_D11AC5G_ID) || (device == BCM4347_CHIP_ID))
7061 		return 0;
7062 
7063 	if ((device == BCM4361_D11AC_ID) || (device == BCM4361_D11AC2G_ID) ||
7064 		(device == BCM4361_D11AC5G_ID) || (device == BCM4361_CHIP_ID))
7065 		return 0;
7066 
7067 	if ((device == BCM4362_D11AX_ID) || (device == BCM4362_D11AX2G_ID) ||
7068 		(device == BCM4362_D11AX5G_ID) || (device == BCM4362_CHIP_ID)) {
7069 		return 0;
7070 	}
7071 
7072 	if ((device == BCM4365_D11AC_ID) || (device == BCM4365_D11AC2G_ID) ||
7073 		(device == BCM4365_D11AC5G_ID) || (device == BCM4365_CHIP_ID))
7074 		return 0;
7075 
7076 	if ((device == BCM4366_D11AC_ID) || (device == BCM4366_D11AC2G_ID) ||
7077 		(device == BCM4366_D11AC5G_ID) || (device == BCM4366_CHIP_ID))
7078 		return 0;
7079 #ifndef DHD_EFI
7080 	DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device));
7081 #endif
7082 	return (-ENODEV);
7083 } /* dhdpcie_chipmatch */
7084 
7085 /**
7086  * Name:  dhdpcie_cc_nvmshadow
7087  *
7088  * Description:
7089  * A shadow of OTP/SPROM exists in ChipCommon Region
7090  * betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
7091  * Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
7092  * can also be read from ChipCommon Registers.
7093  */
7094 static int
dhdpcie_cc_nvmshadow(dhd_bus_t * bus,struct bcmstrbuf * b)7095 dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b)
7096 {
7097 	uint16 dump_offset = 0;
7098 	uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
7099 
7100 	/* Table for 65nm OTP Size (in bits) */
7101 	int  otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
7102 
7103 	volatile uint16 *nvm_shadow;
7104 
7105 	uint cur_coreid;
7106 	uint chipc_corerev;
7107 	chipcregs_t *chipcregs;
7108 
7109 	/* Save the current core */
7110 	cur_coreid = si_coreid(bus->sih);
7111 	/* Switch to ChipC */
7112 	chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
7113 	ASSERT(chipcregs != NULL);
7114 
7115 	chipc_corerev = si_corerev(bus->sih);
7116 
7117 	/* Check ChipcommonCore Rev */
7118 	if (chipc_corerev < 44) {
7119 		DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
7120 		return BCME_UNSUPPORTED;
7121 	}
7122 
7123 	/* Check ChipID */
7124 	if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) && !BCM4345_CHIP((uint16)bus->sih->chip) &&
7125 	        ((uint16)bus->sih->chip != BCM4355_CHIP_ID) &&
7126 	        ((uint16)bus->sih->chip != BCM4364_CHIP_ID)) {
7127 		DHD_ERROR(("%s: cc_nvmdump cmd. supported for Olympic chips"
7128 					"4350/4345/4355/4364 only\n", __FUNCTION__));
7129 		return BCME_UNSUPPORTED;
7130 	}
7131 
7132 	/* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
7133 	if (chipcregs->sromcontrol & SRC_PRESENT) {
7134 		/* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
7135 		sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
7136 					>> SRC_SIZE_SHIFT))) * 1024;
7137 		bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size);
7138 	}
7139 
7140 	if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
7141 		bcm_bprintf(b, "\nOTP Present");
7142 
7143 		if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
7144 			== OTPL_WRAP_TYPE_40NM) {
7145 			/* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
7146 			/* Chipcommon rev51 is a variation on rev45 and does not support
7147 			 * the latest OTP configuration.
7148 			 */
7149 			if (chipc_corerev != 51 && chipc_corerev >= 49) {
7150 				otp_size = (((chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
7151 					>> OTPL_ROW_SIZE_SHIFT) + 1) * 1024;
7152 				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
7153 			} else {
7154 				otp_size =  (((chipcregs->capabilities & CC_CAP_OTPSIZE)
7155 				        >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
7156 				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
7157 			}
7158 		} else {
7159 			/* This part is untested since newer chips have 40nm OTP */
7160 			/* Chipcommon rev51 is a variation on rev45 and does not support
7161 			 * the latest OTP configuration.
7162 			 */
7163 			if (chipc_corerev != 51 && chipc_corerev >= 49) {
7164 				otp_size = otp_size_65nm[(chipcregs->otplayout & OTPL_ROW_SIZE_MASK)
7165 						>> OTPL_ROW_SIZE_SHIFT];
7166 				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
7167 			} else {
7168 				otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE)
7169 					        >> CC_CAP_OTPSIZE_SHIFT];
7170 				bcm_bprintf(b, "(Size %d bits)\n", otp_size);
7171 				DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
7172 					__FUNCTION__));
7173 			}
7174 		}
7175 	}
7176 
7177 	/* Chipcommon rev51 is a variation on rev45 and does not support
7178 	 * the latest OTP configuration.
7179 	 */
7180 	if (chipc_corerev != 51 && chipc_corerev >= 49) {
7181 		if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
7182 			((chipcregs->otplayout & OTPL_ROW_SIZE_MASK) == 0)) {
7183 			DHD_ERROR(("%s: SPROM and OTP could not be found "
7184 				"sromcontrol = %x, otplayout = %x \n",
7185 				__FUNCTION__, chipcregs->sromcontrol, chipcregs->otplayout));
7186 			return BCME_NOTFOUND;
7187 		}
7188 	} else {
7189 		if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
7190 			((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
7191 			DHD_ERROR(("%s: SPROM and OTP could not be found "
7192 				"sromcontrol = %x, capablities = %x \n",
7193 				__FUNCTION__, chipcregs->sromcontrol, chipcregs->capabilities));
7194 			return BCME_NOTFOUND;
7195 		}
7196 	}
7197 
7198 	/* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
7199 	if ((!(chipcregs->sromcontrol & SRC_PRESENT) || (chipcregs->sromcontrol & SRC_OTPSEL)) &&
7200 		(chipcregs->sromcontrol & SRC_OTPPRESENT)) {
7201 
7202 		bcm_bprintf(b, "OTP Strap selected.\n"
7203 		               "\nOTP Shadow in ChipCommon:\n");
7204 
7205 		dump_size = otp_size / 16 ; /* 16bit words */
7206 
7207 	} else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
7208 		(chipcregs->sromcontrol & SRC_PRESENT)) {
7209 
7210 		bcm_bprintf(b, "SPROM Strap selected\n"
7211 				"\nSPROM Shadow in ChipCommon:\n");
7212 
7213 		/* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
7214 		/* dump_size in 16bit words */
7215 		dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
7216 	} else {
7217 		DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
7218 			__FUNCTION__));
7219 		return BCME_NOTFOUND;
7220 	}
7221 
7222 	if (bus->regs == NULL) {
7223 		DHD_ERROR(("ChipCommon Regs. not initialized\n"));
7224 		return BCME_NOTREADY;
7225 	} else {
7226 		bcm_bprintf(b, "\n OffSet:");
7227 
7228 		/* Chipcommon rev51 is a variation on rev45 and does not support
7229 		 * the latest OTP configuration.
7230 		 */
7231 		if (chipc_corerev != 51 && chipc_corerev >= 49) {
7232 			/* Chip common can read only 8kbits,
7233 			* for ccrev >= 49 otp size is around 12 kbits so use GCI core
7234 			*/
7235 			nvm_shadow = (volatile uint16 *)si_setcore(bus->sih, GCI_CORE_ID, 0);
7236 		} else {
7237 			/* Point to the SPROM/OTP shadow in ChipCommon */
7238 			nvm_shadow = chipcregs->sromotp;
7239 		}
7240 
7241 		if (nvm_shadow == NULL) {
7242 			DHD_ERROR(("%s: NVM Shadow is not intialized\n", __FUNCTION__));
7243 			return BCME_NOTFOUND;
7244 		}
7245 
7246 		/*
7247 		* Read 16 bits / iteration.
7248 		* dump_size & dump_offset in 16-bit words
7249 		*/
7250 		while (dump_offset < dump_size) {
7251 			if (dump_offset % 2 == 0)
7252 				/* Print the offset in the shadow space in Bytes */
7253 				bcm_bprintf(b, "\n 0x%04x", dump_offset * 2);
7254 
7255 			bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
7256 			dump_offset += 0x1;
7257 		}
7258 	}
7259 
7260 	/* Switch back to the original core */
7261 	si_setcore(bus->sih, cur_coreid, 0);
7262 
7263 	return BCME_OK;
7264 } /* dhdpcie_cc_nvmshadow */
7265 
7266 /** Flow rings are dynamically created and destroyed */
dhd_bus_clean_flow_ring(dhd_bus_t * bus,void * node)7267 void dhd_bus_clean_flow_ring(dhd_bus_t *bus, void *node)
7268 {
7269 	void *pkt;
7270 	flow_queue_t *queue;
7271 	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)node;
7272 	unsigned long flags;
7273 
7274 	queue = &flow_ring_node->queue;
7275 
7276 #ifdef DHDTCPACK_SUPPRESS
7277 	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
7278 	 * when there is a newly coming packet from network stack.
7279 	 */
7280 	dhd_tcpack_info_tbl_clean(bus->dhd);
7281 #endif /* DHDTCPACK_SUPPRESS */
7282 
7283 	/* clean up BUS level info */
7284 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
7285 
7286 	/* Flush all pending packets in the queue, if any */
7287 	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
7288 		PKTFREE(bus->dhd->osh, pkt, TRUE);
7289 	}
7290 	ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
7291 
7292 	/* Reinitialise flowring's queue */
7293 	dhd_flow_queue_reinit(bus->dhd, queue, FLOW_RING_QUEUE_THRESHOLD);
7294 	flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
7295 	flow_ring_node->active = FALSE;
7296 
7297 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
7298 
7299 	/* Hold flowring_list_lock to ensure no race condition while accessing the List */
7300 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
7301 	dll_delete(&flow_ring_node->list);
7302 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
7303 
7304 	/* Release the flowring object back into the pool */
7305 	dhd_prot_flowrings_pool_release(bus->dhd,
7306 		flow_ring_node->flowid, flow_ring_node->prot_info);
7307 
7308 	/* Free the flowid back to the flowid allocator */
7309 	dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
7310 	                flow_ring_node->flowid);
7311 }
7312 
7313 /**
7314  * Allocate a Flow ring buffer,
7315  * Init Ring buffer, send Msg to device about flow ring creation
7316 */
7317 int
dhd_bus_flow_ring_create_request(dhd_bus_t * bus,void * arg)7318 dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg)
7319 {
7320 	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
7321 
7322 	DHD_INFO(("%s :Flow create\n", __FUNCTION__));
7323 
7324 	/* Send Msg to device about flow ring creation */
7325 	if (dhd_prot_flow_ring_create(bus->dhd, flow_ring_node) != BCME_OK)
7326 		return BCME_NOMEM;
7327 
7328 	return BCME_OK;
7329 }
7330 
7331 /** Handle response from dongle on a 'flow ring create' request */
7332 void
dhd_bus_flow_ring_create_response(dhd_bus_t * bus,uint16 flowid,int32 status)7333 dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status)
7334 {
7335 	flow_ring_node_t *flow_ring_node;
7336 	unsigned long flags;
7337 
7338 	DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
7339 
7340 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
7341 	ASSERT(flow_ring_node->flowid == flowid);
7342 
7343 	if (status != BCME_OK) {
7344 		DHD_ERROR(("%s Flow create Response failure error status = %d \n",
7345 		     __FUNCTION__, status));
7346 		/* Call Flow clean up */
7347 		dhd_bus_clean_flow_ring(bus, flow_ring_node);
7348 		return;
7349 	}
7350 
7351 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
7352 	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
7353 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
7354 
7355 	/* Now add the Flow ring node into the active list
7356 	 * Note that this code to add the newly created node to the active
7357 	 * list was living in dhd_flowid_lookup. But note that after
7358 	 * adding the node to the active list the contents of node is being
7359 	 * filled in dhd_prot_flow_ring_create.
7360 	 * If there is a D2H interrupt after the node gets added to the
7361 	 * active list and before the node gets populated with values
7362 	 * from the Bottom half dhd_update_txflowrings would be called.
7363 	 * which will then try to walk through the active flow ring list,
7364 	 * pickup the nodes and operate on them. Now note that since
7365 	 * the function dhd_prot_flow_ring_create is not finished yet
7366 	 * the contents of flow_ring_node can still be NULL leading to
7367 	 * crashes. Hence the flow_ring_node should be added to the
7368 	 * active list only after its truely created, which is after
7369 	 * receiving the create response message from the Host.
7370 	 */
7371 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
7372 	dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
7373 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
7374 
7375 	dhd_bus_schedule_queue(bus, flowid, FALSE); /* from queue to flowring */
7376 
7377 	return;
7378 }
7379 
7380 int
dhd_bus_flow_ring_delete_request(dhd_bus_t * bus,void * arg)7381 dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg)
7382 {
7383 	void * pkt;
7384 	flow_queue_t *queue;
7385 	flow_ring_node_t *flow_ring_node;
7386 	unsigned long flags;
7387 
7388 	DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
7389 
7390 	flow_ring_node = (flow_ring_node_t *)arg;
7391 
7392 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
7393 	if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING) {
7394 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
7395 		DHD_ERROR(("%s :Delete Pending flowid %u\n", __FUNCTION__, flow_ring_node->flowid));
7396 		return BCME_ERROR;
7397 	}
7398 	flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
7399 
7400 	queue = &flow_ring_node->queue; /* queue associated with flow ring */
7401 
7402 #ifdef DHDTCPACK_SUPPRESS
7403 	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
7404 	 * when there is a newly coming packet from network stack.
7405 	 */
7406 	dhd_tcpack_info_tbl_clean(bus->dhd);
7407 #endif /* DHDTCPACK_SUPPRESS */
7408 	/* Flush all pending packets in the queue, if any */
7409 	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
7410 		PKTFREE(bus->dhd->osh, pkt, TRUE);
7411 	}
7412 	ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
7413 
7414 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
7415 
7416 	/* Send Msg to device about flow ring deletion */
7417 	dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node);
7418 
7419 	return BCME_OK;
7420 }
7421 
7422 void
dhd_bus_flow_ring_delete_response(dhd_bus_t * bus,uint16 flowid,uint32 status)7423 dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
7424 {
7425 	flow_ring_node_t *flow_ring_node;
7426 
7427 	DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
7428 
7429 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
7430 	ASSERT(flow_ring_node->flowid == flowid);
7431 
7432 	if (status != BCME_OK) {
7433 		DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
7434 		    __FUNCTION__, status));
7435 		return;
7436 	}
7437 	/* Call Flow clean up */
7438 	dhd_bus_clean_flow_ring(bus, flow_ring_node);
7439 
7440 	return;
7441 
7442 }
7443 
dhd_bus_flow_ring_flush_request(dhd_bus_t * bus,void * arg)7444 int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
7445 {
7446 	void *pkt;
7447 	flow_queue_t *queue;
7448 	flow_ring_node_t *flow_ring_node;
7449 	unsigned long flags;
7450 
7451 	DHD_INFO(("%s :Flow Flush\n", __FUNCTION__));
7452 
7453 	flow_ring_node = (flow_ring_node_t *)arg;
7454 
7455 	DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
7456 	queue = &flow_ring_node->queue; /* queue associated with flow ring */
7457 	/* Flow ring status will be set back to FLOW_RING_STATUS_OPEN
7458 	 * once flow ring flush response is received for this flowring node.
7459 	 */
7460 	flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
7461 
7462 #ifdef DHDTCPACK_SUPPRESS
7463 	/* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
7464 	 * when there is a newly coming packet from network stack.
7465 	 */
7466 	dhd_tcpack_info_tbl_clean(bus->dhd);
7467 #endif /* DHDTCPACK_SUPPRESS */
7468 
7469 	/* Flush all pending packets in the queue, if any */
7470 	while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
7471 		PKTFREE(bus->dhd->osh, pkt, TRUE);
7472 	}
7473 	ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
7474 
7475 	DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
7476 
7477 	/* Send Msg to device about flow ring flush */
7478 	dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
7479 
7480 	return BCME_OK;
7481 }
7482 
7483 void
dhd_bus_flow_ring_flush_response(dhd_bus_t * bus,uint16 flowid,uint32 status)7484 dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
7485 {
7486 	flow_ring_node_t *flow_ring_node;
7487 
7488 	if (status != BCME_OK) {
7489 		DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
7490 		    __FUNCTION__, status));
7491 		return;
7492 	}
7493 
7494 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
7495 	ASSERT(flow_ring_node->flowid == flowid);
7496 
7497 	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
7498 	return;
7499 }
7500 
7501 uint32
dhd_bus_max_h2d_queues(struct dhd_bus * bus)7502 dhd_bus_max_h2d_queues(struct dhd_bus *bus)
7503 {
7504 	return bus->max_submission_rings;
7505 }
7506 
7507 /* To be symmetric with SDIO */
7508 void
dhd_bus_pktq_flush(dhd_pub_t * dhdp)7509 dhd_bus_pktq_flush(dhd_pub_t *dhdp)
7510 {
7511 	return;
7512 }
7513 
7514 void
dhd_bus_set_linkdown(dhd_pub_t * dhdp,bool val)7515 dhd_bus_set_linkdown(dhd_pub_t *dhdp, bool val)
7516 {
7517 	dhdp->bus->is_linkdown = val;
7518 }
7519 
7520 #ifdef IDLE_TX_FLOW_MGMT
7521 /* resume request */
7522 int
dhd_bus_flow_ring_resume_request(dhd_bus_t * bus,void * arg)7523 dhd_bus_flow_ring_resume_request(dhd_bus_t *bus, void *arg)
7524 {
7525 	flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
7526 
7527 	DHD_ERROR(("%s :Flow Resume Request flow id %u\n", __FUNCTION__, flow_ring_node->flowid));
7528 
7529 	flow_ring_node->status = FLOW_RING_STATUS_RESUME_PENDING;
7530 
7531 	/* Send Msg to device about flow ring resume */
7532 	dhd_prot_flow_ring_resume(bus->dhd, flow_ring_node);
7533 
7534 	return BCME_OK;
7535 }
7536 
7537 /* add the node back to active flowring */
7538 void
dhd_bus_flow_ring_resume_response(dhd_bus_t * bus,uint16 flowid,int32 status)7539 dhd_bus_flow_ring_resume_response(dhd_bus_t *bus, uint16 flowid, int32 status)
7540 {
7541 
7542 	flow_ring_node_t *flow_ring_node;
7543 
7544 	DHD_TRACE(("%s :flowid %d \n", __FUNCTION__, flowid));
7545 
7546 	flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
7547 	ASSERT(flow_ring_node->flowid == flowid);
7548 
7549 	if (status != BCME_OK) {
7550 		DHD_ERROR(("%s Error Status = %d \n",
7551 			__FUNCTION__, status));
7552 		return;
7553 	}
7554 
7555 	DHD_TRACE(("%s :Number of pkts queued in FlowId:%d is -> %u!!\n",
7556 		__FUNCTION__, flow_ring_node->flowid,  flow_ring_node->queue.len));
7557 
7558 	flow_ring_node->status = FLOW_RING_STATUS_OPEN;
7559 
7560 	dhd_bus_schedule_queue(bus, flowid, FALSE);
7561 	return;
7562 }
7563 
7564 /* scan the flow rings in active list for idle time out */
7565 void
dhd_bus_check_idle_scan(dhd_bus_t * bus)7566 dhd_bus_check_idle_scan(dhd_bus_t *bus)
7567 {
7568 	uint64 time_stamp; /* in millisec */
7569 	uint64 diff;
7570 
7571 	time_stamp = OSL_SYSUPTIME();
7572 	diff = time_stamp - bus->active_list_last_process_ts;
7573 
7574 	if (diff > IDLE_FLOW_LIST_TIMEOUT) {
7575 		dhd_bus_idle_scan(bus);
7576 		bus->active_list_last_process_ts = OSL_SYSUPTIME();
7577 	}
7578 
7579 	return;
7580 }
7581 
7582 
7583 /* scan the nodes in active list till it finds a non idle node */
7584 void
dhd_bus_idle_scan(dhd_bus_t * bus)7585 dhd_bus_idle_scan(dhd_bus_t *bus)
7586 {
7587 	dll_t *item, *prev;
7588 	flow_ring_node_t *flow_ring_node;
7589 	uint64 time_stamp, diff;
7590 	unsigned long flags;
7591 	uint16 ringid[MAX_SUSPEND_REQ];
7592 	uint16 count = 0;
7593 
7594 	time_stamp = OSL_SYSUPTIME();
7595 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
7596 
7597 	for (item = dll_tail_p(&bus->flowring_active_list);
7598 	         !dll_end(&bus->flowring_active_list, item); item = prev) {
7599 		prev = dll_prev_p(item);
7600 
7601 		flow_ring_node = dhd_constlist_to_flowring(item);
7602 
7603 		if (flow_ring_node->flowid == (bus->max_submission_rings - 1))
7604 			continue;
7605 
7606 		if (flow_ring_node->status != FLOW_RING_STATUS_OPEN) {
7607 			/* Takes care of deleting zombie rings */
7608 			/* delete from the active list */
7609 			DHD_INFO(("deleting flow id %u from active list\n",
7610 				flow_ring_node->flowid));
7611 			__dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
7612 			continue;
7613 		}
7614 
7615 		diff = time_stamp - flow_ring_node->last_active_ts;
7616 
7617 		if ((diff > IDLE_FLOW_RING_TIMEOUT) && !(flow_ring_node->queue.len))  {
7618 			DHD_ERROR(("\nSuspending flowid %d\n", flow_ring_node->flowid));
7619 			/* delete from the active list */
7620 			__dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
7621 			flow_ring_node->status = FLOW_RING_STATUS_SUSPENDED;
7622 			ringid[count] = flow_ring_node->flowid;
7623 			count++;
7624 			if (count == MAX_SUSPEND_REQ) {
7625 				/* create a batch message now!! */
7626 				dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
7627 				count = 0;
7628 			}
7629 
7630 		} else {
7631 
7632 			/* No more scanning, break from here! */
7633 			break;
7634 		}
7635 	}
7636 
7637 	if (count) {
7638 		dhd_prot_flow_ring_batch_suspend_request(bus->dhd, ringid, count);
7639 	}
7640 
7641 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
7642 
7643 	return;
7644 }
7645 
dhd_flow_ring_move_to_active_list_head(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)7646 void dhd_flow_ring_move_to_active_list_head(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
7647 {
7648 	unsigned long flags;
7649 	dll_t* list;
7650 
7651 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
7652 	/* check if the node is already at head, otherwise delete it and prepend */
7653 	list = dll_head_p(&bus->flowring_active_list);
7654 	if (&flow_ring_node->list != list) {
7655 		dll_delete(&flow_ring_node->list);
7656 		dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
7657 	}
7658 
7659 	/* update flow ring timestamp */
7660 	flow_ring_node->last_active_ts = OSL_SYSUPTIME();
7661 
7662 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
7663 
7664 	return;
7665 }
7666 
dhd_flow_ring_add_to_active_list(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)7667 void dhd_flow_ring_add_to_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
7668 {
7669 	unsigned long flags;
7670 
7671 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
7672 
7673 	dll_prepend(&bus->flowring_active_list, &flow_ring_node->list);
7674 	/* update flow ring timestamp */
7675 	flow_ring_node->last_active_ts = OSL_SYSUPTIME();
7676 
7677 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
7678 
7679 	return;
7680 }
__dhd_flow_ring_delete_from_active_list(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)7681 void __dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
7682 {
7683 	dll_delete(&flow_ring_node->list);
7684 }
7685 
dhd_flow_ring_delete_from_active_list(struct dhd_bus * bus,flow_ring_node_t * flow_ring_node)7686 void dhd_flow_ring_delete_from_active_list(struct dhd_bus *bus, flow_ring_node_t *flow_ring_node)
7687 {
7688 	unsigned long flags;
7689 
7690 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, flags);
7691 
7692 	__dhd_flow_ring_delete_from_active_list(bus, flow_ring_node);
7693 
7694 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, flags);
7695 
7696 	return;
7697 }
7698 #endif /* IDLE_TX_FLOW_MGMT */
7699 
7700 int
dhdpcie_bus_clock_start(struct dhd_bus * bus)7701 dhdpcie_bus_clock_start(struct dhd_bus *bus)
7702 {
7703 	return dhdpcie_start_host_pcieclock(bus);
7704 }
7705 
7706 int
dhdpcie_bus_clock_stop(struct dhd_bus * bus)7707 dhdpcie_bus_clock_stop(struct dhd_bus *bus)
7708 {
7709 	return dhdpcie_stop_host_pcieclock(bus);
7710 }
7711 
7712 int
dhdpcie_bus_disable_device(struct dhd_bus * bus)7713 dhdpcie_bus_disable_device(struct dhd_bus *bus)
7714 {
7715 	return dhdpcie_disable_device(bus);
7716 }
7717 
7718 int
dhdpcie_bus_enable_device(struct dhd_bus * bus)7719 dhdpcie_bus_enable_device(struct dhd_bus *bus)
7720 {
7721 	return dhdpcie_enable_device(bus);
7722 }
7723 
7724 int
dhdpcie_bus_alloc_resource(struct dhd_bus * bus)7725 dhdpcie_bus_alloc_resource(struct dhd_bus *bus)
7726 {
7727 	return dhdpcie_alloc_resource(bus);
7728 }
7729 
7730 void
dhdpcie_bus_free_resource(struct dhd_bus * bus)7731 dhdpcie_bus_free_resource(struct dhd_bus *bus)
7732 {
7733 	dhdpcie_free_resource(bus);
7734 }
7735 
7736 int
dhd_bus_request_irq(struct dhd_bus * bus)7737 dhd_bus_request_irq(struct dhd_bus *bus)
7738 {
7739 	return dhdpcie_bus_request_irq(bus);
7740 }
7741 
7742 bool
dhdpcie_bus_dongle_attach(struct dhd_bus * bus)7743 dhdpcie_bus_dongle_attach(struct dhd_bus *bus)
7744 {
7745 	return dhdpcie_dongle_attach(bus);
7746 }
7747 
7748 int
dhd_bus_release_dongle(struct dhd_bus * bus)7749 dhd_bus_release_dongle(struct dhd_bus *bus)
7750 {
7751 	bool dongle_isolation;
7752 	osl_t *osh;
7753 
7754 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
7755 
7756 	if (bus) {
7757 		osh = bus->osh;
7758 		ASSERT(osh);
7759 
7760 		if (bus->dhd) {
7761 			dongle_isolation = bus->dhd->dongle_isolation;
7762 			dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
7763 		}
7764 	}
7765 
7766 	return 0;
7767 }
7768 
7769 void
dhdpcie_cto_init(struct dhd_bus * bus,bool enable)7770 dhdpcie_cto_init(struct dhd_bus *bus, bool enable)
7771 {
7772 	if (enable) {
7773 		dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4,
7774 			PCI_CTO_INT_MASK | PCI_SBIM_MASK_SERR);
7775 		dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, SPROM_BACKPLANE_EN);
7776 
7777 		if (bus->dhd->cto_threshold == 0) {
7778 			bus->dhd->cto_threshold = PCIE_CTO_TO_THRESH_DEFAULT;
7779 		}
7780 
7781 		si_corereg(bus->sih, bus->sih->buscoreidx,
7782 				OFFSETOF(sbpcieregs_t, ctoctrl), ~0,
7783 				((bus->dhd->cto_threshold << PCIE_CTO_TO_THRESHOLD_SHIFT) &
7784 				PCIE_CTO_TO_THRESHHOLD_MASK) |
7785 				((PCIE_CTO_CLKCHKCNT_VAL << PCIE_CTO_CLKCHKCNT_SHIFT) &
7786 				PCIE_CTO_CLKCHKCNT_MASK) |
7787 				PCIE_CTO_ENAB_MASK);
7788 	} else {
7789 		dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, 0);
7790 		dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, 0);
7791 
7792 		si_corereg(bus->sih, bus->sih->buscoreidx,
7793 				OFFSETOF(sbpcieregs_t, ctoctrl), ~0, 0);
7794 	}
7795 }
7796 
7797 static void
dhdpcie_cto_error_recovery(struct dhd_bus * bus)7798 dhdpcie_cto_error_recovery(struct dhd_bus *bus)
7799 {
7800 	uint32 pci_intmask, err_status;
7801 	uint8 i = 0;
7802 
7803 	pci_intmask = dhdpcie_bus_cfg_read_dword(bus, PCI_INT_MASK, 4);
7804 	dhdpcie_bus_cfg_write_dword(bus, PCI_INT_MASK, 4, pci_intmask & ~PCI_CTO_INT_MASK);
7805 
7806 	DHD_OS_WAKE_LOCK(bus->dhd);
7807 
7808 	/* reset backplane */
7809 	dhdpcie_bus_cfg_write_dword(bus, PCI_SPROM_CONTROL, 4, SPROM_CFG_TO_SB_RST);
7810 
7811 	/* clear timeout error */
7812 	while (1) {
7813 		err_status =  si_corereg(bus->sih, bus->sih->buscoreidx,
7814 			OFFSETOF(sbpcieregs_t, dm_errlog),
7815 			0, 0);
7816 		if (err_status & PCIE_CTO_ERR_MASK) {
7817 			si_corereg(bus->sih, bus->sih->buscoreidx,
7818 					OFFSETOF(sbpcieregs_t, dm_errlog),
7819 					~0, PCIE_CTO_ERR_MASK);
7820 		} else {
7821 			break;
7822 		}
7823 		OSL_DELAY(CTO_TO_CLEAR_WAIT_MS * 1000);
7824 		i++;
7825 		if (i > CTO_TO_CLEAR_WAIT_MAX_CNT) {
7826 			DHD_ERROR(("cto recovery fail\n"));
7827 
7828 			DHD_OS_WAKE_UNLOCK(bus->dhd);
7829 			return;
7830 		}
7831 	}
7832 
7833 	/* clear interrupt status */
7834 	dhdpcie_bus_cfg_write_dword(bus, PCI_INT_STATUS, 4, PCI_CTO_INT_MASK);
7835 
7836 	/* Halt ARM & remove reset */
7837 	/* TBD : we can add ARM Halt here in case */
7838 
7839 	DHD_ERROR(("cto recovery success\n"));
7840 
7841 	DHD_OS_WAKE_UNLOCK(bus->dhd);
7842 }
7843 
7844 #ifdef BCMPCIE_OOB_HOST_WAKE
7845 int
dhd_bus_oob_intr_register(dhd_pub_t * dhdp)7846 dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
7847 {
7848 	return dhdpcie_oob_intr_register(dhdp->bus);
7849 }
7850 
7851 void
dhd_bus_oob_intr_unregister(dhd_pub_t * dhdp)7852 dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
7853 {
7854 	dhdpcie_oob_intr_unregister(dhdp->bus);
7855 }
7856 
7857 void
dhd_bus_oob_intr_set(dhd_pub_t * dhdp,bool enable)7858 dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
7859 {
7860 	dhdpcie_oob_intr_set(dhdp->bus, enable);
7861 }
7862 #endif /* BCMPCIE_OOB_HOST_WAKE */
7863 
7864 
7865 
7866 bool
dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t * bus)7867 dhdpcie_bus_get_pcie_hostready_supported(dhd_bus_t *bus)
7868 {
7869 	return bus->dhd->d2h_hostrdy_supported;
7870 }
7871 
7872 void
dhd_pcie_dump_core_regs(dhd_pub_t * pub,uint32 index,uint32 first_addr,uint32 last_addr)7873 dhd_pcie_dump_core_regs(dhd_pub_t * pub, uint32 index, uint32 first_addr, uint32 last_addr)
7874 {
7875 	dhd_bus_t *bus = pub->bus;
7876 	uint32	coreoffset = index << 12;
7877 	uint32	core_addr = SI_ENUM_BASE + coreoffset;
7878 	uint32 value;
7879 
7880 
7881 	while (first_addr <= last_addr) {
7882 		core_addr = SI_ENUM_BASE + coreoffset + first_addr;
7883 		if (si_backplane_access(bus->sih, core_addr, 4, &value, TRUE) != BCME_OK) {
7884 			DHD_ERROR(("Invalid size/addr combination \n"));
7885 		}
7886 		DHD_ERROR(("[0x%08x]: 0x%08x\n", core_addr, value));
7887 		first_addr = first_addr + 4;
7888 	}
7889 }
7890 
7891 #ifdef PCIE_OOB
7892 bool
dhdpcie_bus_get_pcie_oob_dw_supported(dhd_bus_t * bus)7893 dhdpcie_bus_get_pcie_oob_dw_supported(dhd_bus_t *bus)
7894 {
7895 	if (!bus->dhd)
7896 		return FALSE;
7897 	if (bus->oob_enabled) {
7898 		return !bus->dhd->d2h_no_oob_dw;
7899 	} else {
7900 		return FALSE;
7901 	}
7902 }
7903 #endif /* PCIE_OOB */
7904 
7905 void
dhdpcie_bus_enab_pcie_dw(dhd_bus_t * bus,uint8 dw_option)7906 dhdpcie_bus_enab_pcie_dw(dhd_bus_t *bus, uint8 dw_option)
7907 {
7908 	DHD_ERROR(("ENABLING DW:%d\n", dw_option));
7909 	bus->dw_option = dw_option;
7910 }
7911 
7912 #ifdef PCIE_INB_DW
7913 bool
dhdpcie_bus_get_pcie_inband_dw_supported(dhd_bus_t * bus)7914 dhdpcie_bus_get_pcie_inband_dw_supported(dhd_bus_t *bus)
7915 {
7916 	if (!bus->dhd)
7917 		return FALSE;
7918 	if (bus->inb_enabled) {
7919 		return bus->dhd->d2h_inband_dw;
7920 	} else {
7921 		return FALSE;
7922 	}
7923 }
7924 
7925 void
dhdpcie_bus_set_pcie_inband_dw_state(dhd_bus_t * bus,enum dhd_bus_ds_state state)7926 dhdpcie_bus_set_pcie_inband_dw_state(dhd_bus_t *bus, enum dhd_bus_ds_state state)
7927 {
7928 	if (!INBAND_DW_ENAB(bus))
7929 		return;
7930 
7931 	DHD_INFO(("%s:%d\n", __FUNCTION__, state));
7932 	bus->dhd->ds_state = state;
7933 	if (state == DW_DEVICE_DS_DISABLED_WAIT || state == DW_DEVICE_DS_D3_INFORM_WAIT) {
7934 		bus->ds_exit_timeout = 100;
7935 	}
7936 	if (state == DW_DEVICE_HOST_WAKE_WAIT) {
7937 		bus->host_sleep_exit_timeout = 100;
7938 	}
7939 	if (state == DW_DEVICE_DS_DEV_WAKE) {
7940 		bus->ds_exit_timeout = 0;
7941 	}
7942 	if (state == DW_DEVICE_DS_ACTIVE) {
7943 		bus->host_sleep_exit_timeout = 0;
7944 	}
7945 }
7946 
7947 enum dhd_bus_ds_state
dhdpcie_bus_get_pcie_inband_dw_state(dhd_bus_t * bus)7948 dhdpcie_bus_get_pcie_inband_dw_state(dhd_bus_t *bus)
7949 {
7950 	if (!INBAND_DW_ENAB(bus))
7951 		return DW_DEVICE_DS_INVALID;
7952 	return bus->dhd->ds_state;
7953 }
7954 #endif /* PCIE_INB_DW */
7955 
7956 bool
dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t * bus)7957 dhdpcie_bus_get_pcie_idma_supported(dhd_bus_t *bus)
7958 {
7959 	if (!bus->dhd)
7960 		return FALSE;
7961 	else if (bus->idma_enabled) {
7962 		return bus->dhd->idma_enable;
7963 	} else {
7964 		return FALSE;
7965 	}
7966 }
7967 
7968 bool
dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t * bus)7969 dhdpcie_bus_get_pcie_ifrm_supported(dhd_bus_t *bus)
7970 {
7971 	if (!bus->dhd)
7972 		return FALSE;
7973 	else if (bus->ifrm_enabled) {
7974 		return bus->dhd->ifrm_enable;
7975 	} else {
7976 		return FALSE;
7977 	}
7978 }
7979 
7980 
7981 void
dhd_bus_dump_trap_info(dhd_bus_t * bus,struct bcmstrbuf * strbuf)7982 dhd_bus_dump_trap_info(dhd_bus_t *bus, struct bcmstrbuf *strbuf)
7983 {
7984 	trap_t *tr = &bus->dhd->last_trap_info;
7985 	bcm_bprintf(strbuf,
7986 		"\nTRAP type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
7987 		" lp 0x%x, rpc 0x%x"
7988 		"\nTrap offset 0x%x, r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
7989 		"r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
7990 		ltoh32(tr->type), ltoh32(tr->epc), ltoh32(tr->cpsr), ltoh32(tr->spsr),
7991 		ltoh32(tr->r13), ltoh32(tr->r14), ltoh32(tr->pc),
7992 		ltoh32(bus->pcie_sh->trap_addr),
7993 		ltoh32(tr->r0), ltoh32(tr->r1), ltoh32(tr->r2), ltoh32(tr->r3),
7994 		ltoh32(tr->r4), ltoh32(tr->r5), ltoh32(tr->r6), ltoh32(tr->r7));
7995 }
7996 
7997 int
dhd_bus_readwrite_bp_addr(dhd_pub_t * dhdp,uint addr,uint size,uint * data,bool read)7998 dhd_bus_readwrite_bp_addr(dhd_pub_t *dhdp, uint addr, uint size, uint* data, bool read)
7999 {
8000 	int bcmerror = 0;
8001 	struct dhd_bus *bus = dhdp->bus;
8002 
8003 	if (si_backplane_access(bus->sih, addr, size, data, read) != BCME_OK) {
8004 			DHD_ERROR(("Invalid size/addr combination \n"));
8005 			bcmerror = BCME_ERROR;
8006 	}
8007 
8008 	return bcmerror;
8009 }
8010 
8011 int
dhd_get_idletime(dhd_pub_t * dhd)8012 dhd_get_idletime(dhd_pub_t *dhd)
8013 {
8014 	return dhd->bus->idletime;
8015 }
8016 
8017 #ifdef DHD_SSSR_DUMP
8018 
8019 static INLINE void
dhd_sbreg_op(dhd_pub_t * dhd,uint addr,uint * val,bool read)8020 dhd_sbreg_op(dhd_pub_t *dhd, uint addr, uint *val, bool read)
8021 {
8022 	OSL_DELAY(1);
8023 	si_backplane_access(dhd->bus->sih, addr, sizeof(uint), val, read);
8024 	DHD_ERROR(("%s: addr:0x%x val:0x%x read:%d\n", __FUNCTION__, addr, *val, read));
8025 	return;
8026 }
8027 
8028 static int
dhdpcie_get_sssr_fifo_dump(dhd_pub_t * dhd,uint * buf,uint fifo_size,uint addr_reg,uint data_reg)8029 dhdpcie_get_sssr_fifo_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
8030 	uint addr_reg, uint data_reg)
8031 {
8032 	uint addr;
8033 	uint val = 0;
8034 	int i;
8035 
8036 	DHD_ERROR(("%s\n", __FUNCTION__));
8037 
8038 	if (!buf) {
8039 		DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
8040 		return BCME_ERROR;
8041 	}
8042 
8043 	if (!fifo_size) {
8044 		DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
8045 		return BCME_ERROR;
8046 	}
8047 
8048 	/* Set the base address offset to 0 */
8049 	addr = addr_reg;
8050 	val = 0;
8051 	dhd_sbreg_op(dhd, addr, &val, FALSE);
8052 
8053 	addr = data_reg;
8054 	/* Read 4 bytes at once and loop for fifo_size / 4 */
8055 	for (i = 0; i < fifo_size / 4; i++) {
8056 		si_backplane_access(dhd->bus->sih, addr, sizeof(uint), &val, TRUE);
8057 		buf[i] = val;
8058 		OSL_DELAY(1);
8059 	}
8060 	return BCME_OK;
8061 }
8062 
8063 static int
dhdpcie_get_sssr_vasip_dump(dhd_pub_t * dhd,uint * buf,uint fifo_size,uint addr_reg)8064 dhdpcie_get_sssr_vasip_dump(dhd_pub_t *dhd, uint *buf, uint fifo_size,
8065 	uint addr_reg)
8066 {
8067 	uint addr;
8068 	uint val = 0;
8069 	int i;
8070 
8071 	DHD_ERROR(("%s\n", __FUNCTION__));
8072 
8073 	if (!buf) {
8074 		DHD_ERROR(("%s: buf is NULL\n", __FUNCTION__));
8075 		return BCME_ERROR;
8076 	}
8077 
8078 	if (!fifo_size) {
8079 		DHD_ERROR(("%s: fifo_size is 0\n", __FUNCTION__));
8080 		return BCME_ERROR;
8081 	}
8082 
8083 	/* Check if vasip clk is disabled, if yes enable it */
8084 	addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
8085 	dhd_sbreg_op(dhd, addr, &val, TRUE);
8086 	if (!val) {
8087 		val = 1;
8088 		dhd_sbreg_op(dhd, addr, &val, FALSE);
8089 	}
8090 
8091 	addr = addr_reg;
8092 	/* Read 4 bytes at once and loop for fifo_size / 4 */
8093 	for (i = 0; i < fifo_size / 4; i++, addr += 4) {
8094 		si_backplane_access(dhd->bus->sih, addr, sizeof(uint), &val, TRUE);
8095 		buf[i] = val;
8096 		OSL_DELAY(1);
8097 	}
8098 	return BCME_OK;
8099 }
8100 
8101 static int
dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t * dhd)8102 dhdpcie_resume_chipcommon_powerctrl(dhd_pub_t *dhd)
8103 {
8104 	uint addr;
8105 	uint val;
8106 
8107 	DHD_ERROR(("%s\n", __FUNCTION__));
8108 
8109 	/* conditionally clear bits [11:8] of PowerCtrl */
8110 	addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
8111 	dhd_sbreg_op(dhd, addr, &val, TRUE);
8112 	if (!(val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask)) {
8113 		addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
8114 		val = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask;
8115 		dhd_sbreg_op(dhd, addr, &val, FALSE);
8116 	}
8117 	return BCME_OK;
8118 }
8119 
8120 static int
dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t * dhd)8121 dhdpcie_suspend_chipcommon_powerctrl(dhd_pub_t *dhd)
8122 {
8123 	uint addr;
8124 	uint val;
8125 
8126 	DHD_ERROR(("%s\n", __FUNCTION__));
8127 
8128 	/* conditionally clear bits [11:8] of PowerCtrl */
8129 	addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
8130 	dhd_sbreg_op(dhd, addr, &val, TRUE);
8131 	if (val & dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl_mask) {
8132 		addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.powerctrl;
8133 		val = 0;
8134 		dhd_sbreg_op(dhd, addr, &val, FALSE);
8135 	}
8136 	return BCME_OK;
8137 }
8138 
8139 static int
dhdpcie_clear_intmask_and_timer(dhd_pub_t * dhd)8140 dhdpcie_clear_intmask_and_timer(dhd_pub_t *dhd)
8141 {
8142 	uint addr;
8143 	uint val;
8144 
8145 	DHD_ERROR(("%s\n", __FUNCTION__));
8146 
8147 	/* clear chipcommon intmask */
8148 	addr = dhd->sssr_reg_info.chipcommon_regs.base_regs.intmask;
8149 	val = 0x0;
8150 	dhd_sbreg_op(dhd, addr, &val, FALSE);
8151 
8152 	/* clear PMUIntMask0 */
8153 	addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask0;
8154 	val = 0x0;
8155 	dhd_sbreg_op(dhd, addr, &val, FALSE);
8156 
8157 	/* clear PMUIntMask1 */
8158 	addr = dhd->sssr_reg_info.pmu_regs.base_regs.pmuintmask1;
8159 	val = 0x0;
8160 	dhd_sbreg_op(dhd, addr, &val, FALSE);
8161 
8162 	/* clear res_req_timer */
8163 	addr = dhd->sssr_reg_info.pmu_regs.base_regs.resreqtimer;
8164 	val = 0x0;
8165 	dhd_sbreg_op(dhd, addr, &val, FALSE);
8166 
8167 	/* clear macresreqtimer */
8168 	addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer;
8169 	val = 0x0;
8170 	dhd_sbreg_op(dhd, addr, &val, FALSE);
8171 
8172 	/* clear macresreqtimer1 */
8173 	addr = dhd->sssr_reg_info.pmu_regs.base_regs.macresreqtimer1;
8174 	val = 0x0;
8175 	dhd_sbreg_op(dhd, addr, &val, FALSE);
8176 
8177 	/* clear VasipClkEn */
8178 	if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
8179 		addr = dhd->sssr_reg_info.vasip_regs.wrapper_regs.ioctrl;
8180 		val = 0x0;
8181 		dhd_sbreg_op(dhd, addr, &val, FALSE);
8182 	}
8183 
8184 	return BCME_OK;
8185 }
8186 
8187 static int
dhdpcie_d11_check_outofreset(dhd_pub_t * dhd)8188 dhdpcie_d11_check_outofreset(dhd_pub_t *dhd)
8189 {
8190 	int i;
8191 	uint addr;
8192 	uint val = 0;
8193 
8194 	DHD_ERROR(("%s\n", __FUNCTION__));
8195 
8196 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
8197 		/* Check if bit 0 of resetctrl is cleared */
8198 		addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
8199 		dhd_sbreg_op(dhd, addr, &val, TRUE);
8200 		if (!(val & 1)) {
8201 			dhd->sssr_d11_outofreset[i] = TRUE;
8202 		} else {
8203 			dhd->sssr_d11_outofreset[i] = FALSE;
8204 		}
8205 		DHD_ERROR(("%s: sssr_d11_outofreset[%d] : %d\n",
8206 			__FUNCTION__, i, dhd->sssr_d11_outofreset[i]));
8207 	}
8208 	return BCME_OK;
8209 }
8210 
8211 static int
dhdpcie_d11_clear_clk_req(dhd_pub_t * dhd)8212 dhdpcie_d11_clear_clk_req(dhd_pub_t *dhd)
8213 {
8214 	int i;
8215 	uint addr;
8216 	uint val = 0;
8217 
8218 	DHD_ERROR(("%s\n", __FUNCTION__));
8219 
8220 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
8221 		if (dhd->sssr_d11_outofreset[i]) {
8222 			/* clear request clk only if itopoobb is non zero */
8223 			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.itopoobb;
8224 			dhd_sbreg_op(dhd, addr, &val, TRUE);
8225 			if (val != 0) {
8226 				/* clear clockcontrolstatus */
8227 				addr = dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus;
8228 				val =
8229 				dhd->sssr_reg_info.mac_regs[i].base_regs.clockcontrolstatus_val;
8230 				dhd_sbreg_op(dhd, addr, &val, FALSE);
8231 			}
8232 		}
8233 	}
8234 	return BCME_OK;
8235 }
8236 
8237 static int
dhdpcie_arm_clear_clk_req(dhd_pub_t * dhd)8238 dhdpcie_arm_clear_clk_req(dhd_pub_t *dhd)
8239 {
8240 	uint addr;
8241 	uint val = 0;
8242 
8243 	DHD_ERROR(("%s\n", __FUNCTION__));
8244 
8245 	/* Check if bit 0 of resetctrl is cleared */
8246 	addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.resetctrl;
8247 	dhd_sbreg_op(dhd, addr, &val, TRUE);
8248 	if (!(val & 1)) {
8249 		/* clear request clk only if itopoobb is non zero */
8250 		addr = dhd->sssr_reg_info.arm_regs.wrapper_regs.itopoobb;
8251 		dhd_sbreg_op(dhd, addr, &val, TRUE);
8252 		if (val != 0) {
8253 			/* clear clockcontrolstatus */
8254 			addr = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus;
8255 			val = dhd->sssr_reg_info.arm_regs.base_regs.clockcontrolstatus_val;
8256 			dhd_sbreg_op(dhd, addr, &val, FALSE);
8257 		}
8258 	}
8259 	return BCME_OK;
8260 }
8261 
8262 static int
dhdpcie_pcie_clear_clk_req(dhd_pub_t * dhd)8263 dhdpcie_pcie_clear_clk_req(dhd_pub_t *dhd)
8264 {
8265 	uint addr;
8266 	uint val = 0;
8267 
8268 	DHD_ERROR(("%s\n", __FUNCTION__));
8269 
8270 	/* clear request clk only if itopoobb is non zero */
8271 	addr = dhd->sssr_reg_info.pcie_regs.wrapper_regs.itopoobb;
8272 	dhd_sbreg_op(dhd, addr, &val, TRUE);
8273 	if (val) {
8274 		/* clear clockcontrolstatus */
8275 		addr = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus;
8276 		val = dhd->sssr_reg_info.pcie_regs.base_regs.clockcontrolstatus_val;
8277 		dhd_sbreg_op(dhd, addr, &val, FALSE);
8278 	}
8279 	return BCME_OK;
8280 }
8281 
8282 static int
dhdpcie_pcie_send_ltrsleep(dhd_pub_t * dhd)8283 dhdpcie_pcie_send_ltrsleep(dhd_pub_t *dhd)
8284 {
8285 	uint addr;
8286 	uint val = 0;
8287 
8288 	DHD_ERROR(("%s\n", __FUNCTION__));
8289 
8290 	addr = dhd->sssr_reg_info.pcie_regs.base_regs.ltrstate;
8291 	val = LTR_ACTIVE;
8292 	dhd_sbreg_op(dhd, addr, &val, FALSE);
8293 
8294 	val = LTR_SLEEP;
8295 	dhd_sbreg_op(dhd, addr, &val, FALSE);
8296 
8297 	return BCME_OK;
8298 }
8299 
8300 static int
dhdpcie_clear_clk_req(dhd_pub_t * dhd)8301 dhdpcie_clear_clk_req(dhd_pub_t *dhd)
8302 {
8303 	DHD_ERROR(("%s\n", __FUNCTION__));
8304 
8305 	dhdpcie_arm_clear_clk_req(dhd);
8306 
8307 	dhdpcie_d11_clear_clk_req(dhd);
8308 
8309 	dhdpcie_pcie_clear_clk_req(dhd);
8310 
8311 	return BCME_OK;
8312 }
8313 
8314 static int
dhdpcie_bring_d11_outofreset(dhd_pub_t * dhd)8315 dhdpcie_bring_d11_outofreset(dhd_pub_t *dhd)
8316 {
8317 	int i;
8318 	uint addr;
8319 	uint val = 0;
8320 
8321 	DHD_ERROR(("%s\n", __FUNCTION__));
8322 
8323 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
8324 		if (dhd->sssr_d11_outofreset[i]) {
8325 			/* disable core by setting bit 0 */
8326 			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
8327 			val = 1;
8328 			dhd_sbreg_op(dhd, addr, &val, FALSE);
8329 			OSL_DELAY(6000);
8330 
8331 			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
8332 			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[0];
8333 			dhd_sbreg_op(dhd, addr, &val, FALSE);
8334 
8335 			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[1];
8336 			dhd_sbreg_op(dhd, addr, &val, FALSE);
8337 
8338 			/* enable core by clearing bit 0 */
8339 			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.resetctrl;
8340 			val = 0;
8341 			dhd_sbreg_op(dhd, addr, &val, FALSE);
8342 
8343 			addr = dhd->sssr_reg_info.mac_regs[i].wrapper_regs.ioctrl;
8344 			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[2];
8345 			dhd_sbreg_op(dhd, addr, &val, FALSE);
8346 
8347 			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[3];
8348 			dhd_sbreg_op(dhd, addr, &val, FALSE);
8349 
8350 			val = dhd->sssr_reg_info.mac_regs[0].wrapper_regs.ioctrl_resetseq_val[4];
8351 			dhd_sbreg_op(dhd, addr, &val, FALSE);
8352 		}
8353 	}
8354 	return BCME_OK;
8355 }
8356 
8357 static int
dhdpcie_sssr_dump_get_before_sr(dhd_pub_t * dhd)8358 dhdpcie_sssr_dump_get_before_sr(dhd_pub_t *dhd)
8359 {
8360 	int i;
8361 
8362 	DHD_ERROR(("%s\n", __FUNCTION__));
8363 
8364 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
8365 		if (dhd->sssr_d11_outofreset[i]) {
8366 			dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_before[i],
8367 				dhd->sssr_reg_info.mac_regs[i].sr_size,
8368 				dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
8369 				dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
8370 		}
8371 	}
8372 
8373 	if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
8374 		dhdpcie_get_sssr_vasip_dump(dhd, dhd->sssr_vasip_buf_before,
8375 			dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
8376 			dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
8377 	}
8378 
8379 	return BCME_OK;
8380 }
8381 
8382 static int
dhdpcie_sssr_dump_get_after_sr(dhd_pub_t * dhd)8383 dhdpcie_sssr_dump_get_after_sr(dhd_pub_t *dhd)
8384 {
8385 	int i;
8386 
8387 	DHD_ERROR(("%s\n", __FUNCTION__));
8388 
8389 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
8390 		if (dhd->sssr_d11_outofreset[i]) {
8391 			dhdpcie_get_sssr_fifo_dump(dhd, dhd->sssr_d11_after[i],
8392 				dhd->sssr_reg_info.mac_regs[i].sr_size,
8393 				dhd->sssr_reg_info.mac_regs[i].base_regs.xmtaddress,
8394 				dhd->sssr_reg_info.mac_regs[i].base_regs.xmtdata);
8395 		}
8396 	}
8397 
8398 	if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
8399 		dhdpcie_get_sssr_vasip_dump(dhd, dhd->sssr_vasip_buf_after,
8400 			dhd->sssr_reg_info.vasip_regs.vasip_sr_size,
8401 			dhd->sssr_reg_info.vasip_regs.vasip_sr_addr);
8402 	}
8403 
8404 	return BCME_OK;
8405 }
8406 
8407 int
dhdpcie_sssr_dump(dhd_pub_t * dhd)8408 dhdpcie_sssr_dump(dhd_pub_t *dhd)
8409 {
8410 	if (!dhd->sssr_inited) {
8411 		DHD_ERROR(("%s: SSSR not inited\n", __FUNCTION__));
8412 		return BCME_ERROR;
8413 	}
8414 
8415 	if (dhd->bus->is_linkdown) {
8416 		DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
8417 		return BCME_ERROR;
8418 	}
8419 
8420 	dhdpcie_d11_check_outofreset(dhd);
8421 
8422 	DHD_ERROR(("%s: Collecting Dump before SR\n", __FUNCTION__));
8423 	if (dhdpcie_sssr_dump_get_before_sr(dhd) != BCME_OK) {
8424 		DHD_ERROR(("%s: dhdpcie_sssr_dump_get_before_sr failed\n", __FUNCTION__));
8425 		return BCME_ERROR;
8426 	}
8427 
8428 	dhdpcie_clear_intmask_and_timer(dhd);
8429 	dhdpcie_suspend_chipcommon_powerctrl(dhd);
8430 	dhdpcie_clear_clk_req(dhd);
8431 	dhdpcie_pcie_send_ltrsleep(dhd);
8432 
8433 	/* Wait for some time before Restore */
8434 	OSL_DELAY(6000);
8435 
8436 	dhdpcie_resume_chipcommon_powerctrl(dhd);
8437 	dhdpcie_bring_d11_outofreset(dhd);
8438 
8439 	DHD_ERROR(("%s: Collecting Dump after SR\n", __FUNCTION__));
8440 	if (dhdpcie_sssr_dump_get_after_sr(dhd) != BCME_OK) {
8441 		DHD_ERROR(("%s: dhdpcie_sssr_dump_get_after_sr failed\n", __FUNCTION__));
8442 		return BCME_ERROR;
8443 	}
8444 
8445 	dhd_schedule_sssr_dump(dhd);
8446 
8447 	return BCME_OK;
8448 }
8449 #endif /* DHD_SSSR_DUMP */
8450 
8451 #ifdef DHD_WAKE_STATUS
8452 wake_counts_t*
dhd_bus_get_wakecount(dhd_pub_t * dhd)8453 dhd_bus_get_wakecount(dhd_pub_t *dhd)
8454 {
8455 	if (!dhd->bus) {
8456 		return NULL;
8457 	}
8458 	return &dhd->bus->wake_counts;
8459 }
8460 int
dhd_bus_get_bus_wake(dhd_pub_t * dhd)8461 dhd_bus_get_bus_wake(dhd_pub_t *dhd)
8462 {
8463 	return bcmpcie_set_get_wake(dhd->bus, 0);
8464 }
8465 #endif /* DHD_WAKE_STATUS */
8466 
8467 #ifdef BCM_ASLR_HEAP
8468 /* Writes random number(s) to the TCM. FW upon initialization reads the metadata
8469  * of the random number and then based on metadata, reads the random number from the TCM.
8470  */
8471 static void
dhdpcie_wrt_rnd(struct dhd_bus * bus)8472 dhdpcie_wrt_rnd(struct dhd_bus *bus)
8473 {
8474 	bcm_rand_metadata_t rnd_data;
8475 	uint32 rand_no;
8476 	uint32 count = 1;	/* start with 1 random number */
8477 
8478 	uint32 addr = bus->dongle_ram_base + (bus->ramsize - BCM_NVRAM_OFFSET_TCM) -
8479 		((bus->nvram_csm & 0xffff)* BCM_NVRAM_IMG_COMPRS_FACTOR + sizeof(rnd_data));
8480 	rnd_data.signature = htol32(BCM_RNG_SIGNATURE);
8481 	rnd_data.count = htol32(count);
8482 	/* write the metadata about random number */
8483 	dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rnd_data, sizeof(rnd_data));
8484 	/* scale back by number of random number counts */
8485 	addr -= sizeof(count) * count;
8486 	/* Now write the random number(s) */
8487 	rand_no = htol32(dhd_get_random_number());
8488 	dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&rand_no, sizeof(rand_no));
8489 }
8490 #endif /* BCM_ASLR_HEAP */
8491