1 /*
2 * Linux DHD Bus Module for PCIE
3 *
4 * Copyright (C) 2020, Broadcom.
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 *
21 * <<Broadcom-WL-IPTag/Open:>>
22 *
23 * $Id$
24 */
25
26 /* include files */
27 #include <typedefs.h>
28 #include <bcmutils.h>
29 #include <bcmdevs.h>
30 #include <bcmdevs_legacy.h> /* need to still support chips no longer in trunk firmware */
31 #include <siutils.h>
32 #include <hndsoc.h>
33 #include <hndpmu.h>
34 #include <sbchipc.h>
35 #if defined(DHD_DEBUG)
36 #include <hnd_armtrap.h>
37 #include <hnd_cons.h>
38 #endif /* defined(DHD_DEBUG) */
39 #include <dngl_stats.h>
40 #include <pcie_core.h>
41 #include <dhd.h>
42 #include <dhd_bus.h>
43 #include <dhd_proto.h>
44 #include <dhd_dbg.h>
45 #include <dhdioctl.h>
46 #include <bcmmsgbuf.h>
47 #include <pcicfg.h>
48 #include <dhd_pcie.h>
49 #include <dhd_linux.h>
50 #ifdef CUSTOMER_HW_ROCKCHIP
51 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
52 #include <rk_dhd_pcie_linux.h>
53 #endif
54 #endif /* CUSTOMER_HW_ROCKCHIP */
55 #ifdef OEM_ANDROID
56 #ifdef CONFIG_ARCH_MSM
57 #if defined(CONFIG_PCI_MSM) || defined(CONFIG_ARCH_MSM8996)
58 #include <linux/msm_pcie.h>
59 #else
60 #include <mach/msm_pcie.h>
61 #endif /* CONFIG_PCI_MSM */
62 #endif /* CONFIG_ARCH_MSM */
63 #endif /* OEM_ANDROID */
64
65 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
66 #include <linux/pm_runtime.h>
67 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
68
69 #if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \
70 defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_EXYNOS2100) || \
71 defined(CONFIG_SOC_EXYNOS1000) || defined(CONFIG_SOC_GS101)
72 #include <linux/exynos-pci-ctrl.h>
73 #endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 ||
74 * CONFIG_SOC_EXYNOS9830 || CONFIG_SOC_EXYNOS2100 ||
75 * CONFIG_SOC_EXYNOS1000 || CONFIG_SOC_GS101
76 */
77
78 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
79 #ifndef AUTO_SUSPEND_TIMEOUT
80 #define AUTO_SUSPEND_TIMEOUT 1000
81 #endif /* AUTO_SUSPEND_TIMEOUT */
82 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
83
84 #ifdef DHD_PCIE_RUNTIMEPM
85 #define RPM_WAKE_UP_TIMEOUT 10000 /* ms */
86 #endif /* DHD_PCIE_RUNTIMEPM */
87
88 #include <linux/irq.h>
89 #ifdef USE_SMMU_ARCH_MSM
90 #include <asm/dma-iommu.h>
91 #include <linux/iommu.h>
92 #include <linux/of.h>
93 #include <linux/platform_device.h>
94 #endif /* USE_SMMU_ARCH_MSM */
95 #include <dhd_config.h>
96
97 #ifdef PCIE_OOB
98 #include "ftdi_sio_external.h"
99 #endif /* PCIE_OOB */
100
101 #define PCI_CFG_RETRY 10 /* PR15065: retry count for pci cfg accesses */
102 #define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
103 #define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
104
105 #ifdef PCIE_OOB
106 #define HOST_WAKE 4 /* GPIO_0 (HOST_WAKE) - Output from WLAN */
107 #define DEVICE_WAKE 5 /* GPIO_1 (DEVICE_WAKE) - Input to WLAN */
108 #define BIT_WL_REG_ON 6
109 #define BIT_BT_REG_ON 7
110
111 int gpio_handle_val = 0;
112 unsigned char gpio_port = 0;
113 unsigned char gpio_direction = 0;
114 #define OOB_PORT "ttyUSB0"
115 #endif /* PCIE_OOB */
116
117 #ifndef BCMPCI_DEV_ID
118 #define BCMPCI_DEV_ID PCI_ANY_ID
119 #endif
120
121 #ifdef FORCE_TPOWERON
122 extern uint32 tpoweron_scale;
123 #endif /* FORCE_TPOWERON */
124 /* user defined data structures */
125
126 typedef bool (*dhdpcie_cb_fn_t)(void *);
127
128 typedef struct dhdpcie_info
129 {
130 dhd_bus_t *bus;
131 osl_t *osh;
132 struct pci_dev *dev; /* pci device handle */
133 volatile char *regs; /* pci device memory va */
134 volatile char *tcm; /* pci device memory va */
135 uint32 bar1_size; /* pci device memory size */
136 struct pcos_info *pcos_info;
137 uint16 last_intrstatus; /* to cache intrstatus */
138 int irq;
139 char pciname[32];
140 struct pci_saved_state* default_state;
141 struct pci_saved_state* state;
142 #ifdef BCMPCIE_OOB_HOST_WAKE
143 void *os_cxt; /* Pointer to per-OS private data */
144 #endif /* BCMPCIE_OOB_HOST_WAKE */
145 #ifdef DHD_WAKE_STATUS
146 spinlock_t pkt_wake_lock;
147 unsigned int total_wake_count;
148 int pkt_wake;
149 int wake_irq;
150 #endif /* DHD_WAKE_STATUS */
151 #ifdef USE_SMMU_ARCH_MSM
152 void *smmu_cxt;
153 #endif /* USE_SMMU_ARCH_MSM */
154 } dhdpcie_info_t;
155
156 struct pcos_info {
157 dhdpcie_info_t *pc;
158 spinlock_t lock;
159 wait_queue_head_t intr_wait_queue;
160 timer_list_compat_t tuning_timer;
161 int tuning_timer_exp;
162 atomic_t timer_enab;
163 struct tasklet_struct tuning_tasklet;
164 };
165
166 #ifdef BCMPCIE_OOB_HOST_WAKE
167 typedef struct dhdpcie_os_info {
168 int oob_irq_num; /* valid when hardware or software oob in use */
169 unsigned long oob_irq_flags; /* valid when hardware or software oob in use */
170 bool oob_irq_registered;
171 bool oob_irq_enabled;
172 bool oob_irq_wake_enabled;
173 spinlock_t oob_irq_spinlock;
174 void *dev; /* handle to the underlying device */
175 } dhdpcie_os_info_t;
176 static irqreturn_t wlan_oob_irq(int irq, void *data);
177 #ifdef CUSTOMER_HW2
178 extern struct brcm_pcie_wake brcm_pcie_wake;
179 #endif /* CUSTOMER_HW2 */
180 #endif /* BCMPCIE_OOB_HOST_WAKE */
181
182 #ifdef USE_SMMU_ARCH_MSM
183 typedef struct dhdpcie_smmu_info {
184 struct dma_iommu_mapping *smmu_mapping;
185 dma_addr_t smmu_iova_start;
186 size_t smmu_iova_len;
187 } dhdpcie_smmu_info_t;
188 #endif /* USE_SMMU_ARCH_MSM */
189
190 /* function declarations */
191 static int __devinit
192 dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
193 static void __devexit
194 dhdpcie_pci_remove(struct pci_dev *pdev);
195 static int dhdpcie_init(struct pci_dev *pdev);
196 static irqreturn_t dhdpcie_isr(int irq, void *arg);
197 /* OS Routine functions for PCI suspend/resume */
198
199 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
200 static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state, bool byint);
201 #else
202 static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state);
203 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
204 static int dhdpcie_resume_host_dev(dhd_bus_t *bus);
205 static int dhdpcie_suspend_host_dev(dhd_bus_t *bus);
206 static int dhdpcie_resume_dev(struct pci_dev *dev);
207 static int dhdpcie_suspend_dev(struct pci_dev *dev);
208 #ifdef DHD_PCIE_RUNTIMEPM
209 static int dhdpcie_pm_suspend(struct device *dev);
210 static int dhdpcie_pm_prepare(struct device *dev);
211 static int dhdpcie_pm_resume(struct device *dev);
212 static void dhdpcie_pm_complete(struct device *dev);
213 #else
214 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
215 static int dhdpcie_pm_system_suspend_noirq(struct device * dev);
216 static int dhdpcie_pm_system_resume_noirq(struct device * dev);
217 #else
218 static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state);
219 static int dhdpcie_pci_resume(struct pci_dev *dev);
220 #if defined(BT_OVER_PCIE)
221 static int dhdpcie_pci_resume_early(struct pci_dev *dev);
222 #endif /* BT_OVER_PCIE */
223 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
224 #endif /* DHD_PCIE_RUNTIMEPM */
225
226 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
227 static int dhdpcie_pm_runtime_suspend(struct device * dev);
228 static int dhdpcie_pm_runtime_resume(struct device * dev);
229 static int dhdpcie_pm_system_suspend_noirq(struct device * dev);
230 static int dhdpcie_pm_system_resume_noirq(struct device * dev);
231 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
232
233 #ifdef SUPPORT_EXYNOS7420
exynos_pcie_pm_suspend(int ch_num)234 void exynos_pcie_pm_suspend(int ch_num) {}
exynos_pcie_pm_resume(int ch_num)235 void exynos_pcie_pm_resume(int ch_num) {}
236 #endif /* SUPPORT_EXYNOS7420 */
237
238 static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state);
239
240 uint32
241 dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write,
242 uint32 writeval);
243
244 static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = {
245 { vendor: VENDOR_BROADCOM,
246 device: BCMPCI_DEV_ID,
247 subvendor: PCI_ANY_ID,
248 subdevice: PCI_ANY_ID,
249 class: PCI_CLASS_NETWORK_OTHER << 8,
250 class_mask: 0xffff00,
251 driver_data: 0,
252 },
253 { 0, 0, 0, 0, 0, 0, 0}
254 };
255 MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid);
256
257 /* Power Management Hooks */
258 #ifdef DHD_PCIE_RUNTIMEPM
259 static const struct dev_pm_ops dhd_pcie_pm_ops = {
260 .prepare = dhdpcie_pm_prepare,
261 .suspend = dhdpcie_pm_suspend,
262 .resume = dhdpcie_pm_resume,
263 .complete = dhdpcie_pm_complete,
264 };
265 #endif /* DHD_PCIE_RUNTIMEPM */
266 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
267 static const struct dev_pm_ops dhdpcie_pm_ops = {
268 SET_RUNTIME_PM_OPS(dhdpcie_pm_runtime_suspend, dhdpcie_pm_runtime_resume, NULL)
269 .suspend_noirq = dhdpcie_pm_system_suspend_noirq,
270 .resume_noirq = dhdpcie_pm_system_resume_noirq
271 };
272 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
273
274 static struct pci_driver dhdpcie_driver = {
275 node: {&dhdpcie_driver.node, &dhdpcie_driver.node},
276 name: "pcieh"BUS_TYPE,
277 id_table: dhdpcie_pci_devid,
278 probe: dhdpcie_pci_probe,
279 remove: dhdpcie_pci_remove,
280 #if defined (DHD_PCIE_RUNTIMEPM) || defined (DHD_PCIE_NATIVE_RUNTIMEPM)
281 .driver.pm = &dhd_pcie_pm_ops,
282 #else
283 suspend: dhdpcie_pci_suspend,
284 resume: dhdpcie_pci_resume,
285 #if defined(BT_OVER_PCIE)
286 resume_early: dhdpcie_pci_resume_early,
287 #endif /* BT_OVER_PCIE */
288 #endif /* DHD_PCIE_RUNTIMEPM || DHD_PCIE_NATIVE_RUNTIMEPM */
289 };
290
291 int dhdpcie_init_succeeded = FALSE;
292
293 #ifdef USE_SMMU_ARCH_MSM
dhdpcie_smmu_init(struct pci_dev * pdev,void * smmu_cxt)294 static int dhdpcie_smmu_init(struct pci_dev *pdev, void *smmu_cxt)
295 {
296 struct dma_iommu_mapping *mapping;
297 struct device_node *root_node = NULL;
298 dhdpcie_smmu_info_t *smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt;
299 int smmu_iova_address[2];
300 char *wlan_node = "android,bcmdhd_wlan";
301 char *wlan_smmu_node = "wlan-smmu-iova-address";
302 int atomic_ctx = 1;
303 int s1_bypass = 1;
304 int ret = 0;
305
306 DHD_ERROR(("%s: SMMU initialize\n", __FUNCTION__));
307
308 root_node = of_find_compatible_node(NULL, NULL, wlan_node);
309 if (!root_node) {
310 WARN(1, "failed to get device node of BRCM WLAN\n");
311 return -ENODEV;
312 }
313
314 if (of_property_read_u32_array(root_node, wlan_smmu_node,
315 smmu_iova_address, 2) == 0) {
316 DHD_ERROR(("%s : get SMMU start address 0x%x, size 0x%x\n",
317 __FUNCTION__, smmu_iova_address[0], smmu_iova_address[1]));
318 smmu_info->smmu_iova_start = smmu_iova_address[0];
319 smmu_info->smmu_iova_len = smmu_iova_address[1];
320 } else {
321 printf("%s : can't get smmu iova address property\n",
322 __FUNCTION__);
323 return -ENODEV;
324 }
325
326 if (smmu_info->smmu_iova_len <= 0) {
327 DHD_ERROR(("%s: Invalid smmu iova len %d\n",
328 __FUNCTION__, (int)smmu_info->smmu_iova_len));
329 return -EINVAL;
330 }
331
332 DHD_ERROR(("%s : SMMU init start\n", __FUNCTION__));
333
334 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) ||
335 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
336 DHD_ERROR(("%s: DMA set 64bit mask failed.\n", __FUNCTION__));
337 return -EINVAL;
338 }
339
340 mapping = arm_iommu_create_mapping(&platform_bus_type,
341 smmu_info->smmu_iova_start, smmu_info->smmu_iova_len);
342 if (IS_ERR(mapping)) {
343 DHD_ERROR(("%s: create mapping failed, err = %d\n",
344 __FUNCTION__, ret));
345 ret = PTR_ERR(mapping);
346 goto map_fail;
347 }
348
349 ret = iommu_domain_set_attr(mapping->domain,
350 DOMAIN_ATTR_ATOMIC, &atomic_ctx);
351 if (ret) {
352 DHD_ERROR(("%s: set atomic_ctx attribute failed, err = %d\n",
353 __FUNCTION__, ret));
354 goto set_attr_fail;
355 }
356
357 ret = iommu_domain_set_attr(mapping->domain,
358 DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
359 if (ret < 0) {
360 DHD_ERROR(("%s: set s1_bypass attribute failed, err = %d\n",
361 __FUNCTION__, ret));
362 goto set_attr_fail;
363 }
364
365 ret = arm_iommu_attach_device(&pdev->dev, mapping);
366 if (ret) {
367 DHD_ERROR(("%s: attach device failed, err = %d\n",
368 __FUNCTION__, ret));
369 goto attach_fail;
370 }
371
372 smmu_info->smmu_mapping = mapping;
373
374 return ret;
375
376 attach_fail:
377 set_attr_fail:
378 arm_iommu_release_mapping(mapping);
379 map_fail:
380 return ret;
381 }
382
dhdpcie_smmu_remove(struct pci_dev * pdev,void * smmu_cxt)383 static void dhdpcie_smmu_remove(struct pci_dev *pdev, void *smmu_cxt)
384 {
385 dhdpcie_smmu_info_t *smmu_info;
386
387 if (!smmu_cxt) {
388 return;
389 }
390
391 smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt;
392 if (smmu_info->smmu_mapping) {
393 arm_iommu_detach_device(&pdev->dev);
394 arm_iommu_release_mapping(smmu_info->smmu_mapping);
395 smmu_info->smmu_mapping = NULL;
396 }
397 }
398 #endif /* USE_SMMU_ARCH_MSM */
399
400 #ifdef FORCE_TPOWERON
401 static void
dhd_bus_get_tpoweron(dhd_bus_t * bus)402 dhd_bus_get_tpoweron(dhd_bus_t *bus)
403 {
404
405 uint32 tpoweron_rc;
406 uint32 tpoweron_ep;
407
408 tpoweron_rc = dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
409 PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, FALSE, 0);
410 tpoweron_ep = dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
411 PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, FALSE, 0);
412 DHD_ERROR(("%s: tpoweron_rc:0x%x tpoweron_ep:0x%x\n",
413 __FUNCTION__, tpoweron_rc, tpoweron_ep));
414 }
415
416 static void
dhd_bus_set_tpoweron(dhd_bus_t * bus,uint16 tpoweron)417 dhd_bus_set_tpoweron(dhd_bus_t *bus, uint16 tpoweron)
418 {
419
420 dhd_bus_get_tpoweron(bus);
421 /* Set the tpoweron */
422 DHD_ERROR(("%s tpoweron: 0x%x\n", __FUNCTION__, tpoweron));
423 dhdpcie_rc_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
424 PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, TRUE, tpoweron);
425 dhdpcie_ep_access_cap(bus, PCIE_EXTCAP_ID_L1SS,
426 PCIE_EXTCAP_L1SS_CONTROL2_OFFSET, TRUE, TRUE, tpoweron);
427
428 dhd_bus_get_tpoweron(bus);
429
430 }
431
432 static bool
dhdpcie_chip_req_forced_tpoweron(dhd_bus_t * bus)433 dhdpcie_chip_req_forced_tpoweron(dhd_bus_t *bus)
434 {
435 /*
436 * On Fire's reference platform, coming out of L1.2,
437 * there is a constant delay of 45us between CLKREQ# and stable REFCLK
438 * Due to this delay, with tPowerOn < 50
439 * there is a chance of the refclk sense to trigger on noise.
440 *
441 * Which ever chip needs forced tPowerOn of 50us should be listed below.
442 */
443 if (si_chipid(bus->sih) == BCM4377_CHIP_ID) {
444 return TRUE;
445 }
446 return FALSE;
447 }
448 #endif /* FORCE_TPOWERON */
449
450 #ifdef BT_OVER_PCIE
dhd_bus_pwr_off(dhd_pub_t * dhdp,int reason)451 int dhd_bus_pwr_off(dhd_pub_t *dhdp, int reason)
452 {
453 DHD_ERROR(("%s: WARNING ! function not implemented in %s\n",
454 __FUNCTION__, __FILE__));
455 return BCME_OK;
456 }
457
dhd_bus_pwr_on(dhd_pub_t * dhdp,int reason)458 int dhd_bus_pwr_on(dhd_pub_t *dhdp, int reason)
459 {
460 DHD_ERROR(("%s: WARNING ! function not implemented in %s\n",
461 __FUNCTION__, __FILE__));
462 return BCME_OK;
463 }
464
dhd_bus_pwr_toggle(dhd_pub_t * dhdp,int reason)465 int dhd_bus_pwr_toggle(dhd_pub_t *dhdp, int reason)
466 {
467 DHD_ERROR(("%s: WARNING ! function not implemented in %s\n",
468 __FUNCTION__, __FILE__));
469 return BCME_OK;
470 }
471
dhdpcie_is_btop_chip(struct dhd_bus * bus)472 bool dhdpcie_is_btop_chip(struct dhd_bus *bus)
473 {
474 DHD_ERROR(("%s: WARNING ! function not implemented in %s\n",
475 __FUNCTION__, __FILE__));
476 return FALSE;
477 }
478
dhdpcie_redownload_fw(dhd_pub_t * dhdp)479 int dhdpcie_redownload_fw(dhd_pub_t *dhdp)
480 {
481 DHD_ERROR(("%s: WARNING ! function not implemented in %s\n",
482 __FUNCTION__, __FILE__));
483 return BCME_OK;
484 }
485 #endif /* BT_OVER_PCIE */
486
487 static bool
dhd_bus_aspm_enable_dev(dhd_bus_t * bus,struct pci_dev * dev,bool enable)488 dhd_bus_aspm_enable_dev(dhd_bus_t *bus, struct pci_dev *dev, bool enable)
489 {
490 uint32 linkctrl_before;
491 uint32 linkctrl_after = 0;
492 uint8 linkctrl_asm;
493 char *device;
494
495 device = (dev == bus->dev) ? "EP" : "RC";
496
497 linkctrl_before = dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
498 FALSE, FALSE, 0);
499 linkctrl_asm = (linkctrl_before & PCIE_ASPM_CTRL_MASK);
500
501 if (enable) {
502 if (linkctrl_asm == PCIE_ASPM_L1_ENAB) {
503 DHD_ERROR(("%s: %s already enabled linkctrl: 0x%x\n",
504 __FUNCTION__, device, linkctrl_before));
505 return FALSE;
506 }
507 /* Enable only L1 ASPM (bit 1) */
508 dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
509 TRUE, (linkctrl_before | PCIE_ASPM_L1_ENAB));
510 } else {
511 if (linkctrl_asm == 0) {
512 DHD_ERROR(("%s: %s already disabled linkctrl: 0x%x\n",
513 __FUNCTION__, device, linkctrl_before));
514 return FALSE;
515 }
516 /* Disable complete ASPM (bit 1 and bit 0) */
517 dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET, FALSE,
518 TRUE, (linkctrl_before & (~PCIE_ASPM_ENAB)));
519 }
520
521 linkctrl_after = dhdpcie_access_cap(dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
522 FALSE, FALSE, 0);
523 DHD_ERROR(("%s: %s %s, linkctrl_before: 0x%x linkctrl_after: 0x%x\n",
524 __FUNCTION__, device, (enable ? "ENABLE " : "DISABLE"),
525 linkctrl_before, linkctrl_after));
526
527 return TRUE;
528 }
529
530 static bool
dhd_bus_is_rc_ep_aspm_capable(dhd_bus_t * bus)531 dhd_bus_is_rc_ep_aspm_capable(dhd_bus_t *bus)
532 {
533 uint32 rc_aspm_cap;
534 uint32 ep_aspm_cap;
535
536 /* RC ASPM capability */
537 rc_aspm_cap = dhdpcie_access_cap(bus->rc_dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
538 FALSE, FALSE, 0);
539 if (rc_aspm_cap == BCME_ERROR) {
540 DHD_ERROR(("%s RC is not ASPM capable\n", __FUNCTION__));
541 return FALSE;
542 }
543
544 /* EP ASPM capability */
545 ep_aspm_cap = dhdpcie_access_cap(bus->dev, PCIE_CAP_ID_EXP, PCIE_CAP_LINKCTRL_OFFSET,
546 FALSE, FALSE, 0);
547 if (ep_aspm_cap == BCME_ERROR) {
548 DHD_ERROR(("%s EP is not ASPM capable\n", __FUNCTION__));
549 return FALSE;
550 }
551
552 return TRUE;
553 }
554
555 bool
dhd_bus_aspm_enable_rc_ep(dhd_bus_t * bus,bool enable)556 dhd_bus_aspm_enable_rc_ep(dhd_bus_t *bus, bool enable)
557 {
558 bool ret;
559
560 if (!bus->rc_ep_aspm_cap) {
561 DHD_ERROR(("%s: NOT ASPM CAPABLE rc_ep_aspm_cap: %d\n",
562 __FUNCTION__, bus->rc_ep_aspm_cap));
563 return FALSE;
564 }
565
566 if (enable) {
567 /* Enable only L1 ASPM first RC then EP */
568 ret = dhd_bus_aspm_enable_dev(bus, bus->rc_dev, enable);
569 ret = dhd_bus_aspm_enable_dev(bus, bus->dev, enable);
570 } else {
571 /* Disable complete ASPM first EP then RC */
572 ret = dhd_bus_aspm_enable_dev(bus, bus->dev, enable);
573 ret = dhd_bus_aspm_enable_dev(bus, bus->rc_dev, enable);
574 }
575
576 return ret;
577 }
578
579 static void
dhd_bus_l1ss_enable_dev(dhd_bus_t * bus,struct pci_dev * dev,bool enable)580 dhd_bus_l1ss_enable_dev(dhd_bus_t *bus, struct pci_dev *dev, bool enable)
581 {
582 uint32 l1ssctrl_before;
583 uint32 l1ssctrl_after = 0;
584 uint8 l1ss_ep;
585 char *device;
586
587 device = (dev == bus->dev) ? "EP" : "RC";
588
589 /* Extendend Capacility Reg */
590 l1ssctrl_before = dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS,
591 PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
592 l1ss_ep = (l1ssctrl_before & PCIE_EXT_L1SS_MASK);
593
594 if (enable) {
595 if (l1ss_ep == PCIE_EXT_L1SS_ENAB) {
596 DHD_ERROR(("%s: %s already enabled, l1ssctrl: 0x%x\n",
597 __FUNCTION__, device, l1ssctrl_before));
598 return;
599 }
600 dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET,
601 TRUE, TRUE, (l1ssctrl_before | PCIE_EXT_L1SS_ENAB));
602 } else {
603 if (l1ss_ep == 0) {
604 DHD_ERROR(("%s: %s already disabled, l1ssctrl: 0x%x\n",
605 __FUNCTION__, device, l1ssctrl_before));
606 return;
607 }
608 dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS, PCIE_EXTCAP_L1SS_CONTROL_OFFSET,
609 TRUE, TRUE, (l1ssctrl_before & (~PCIE_EXT_L1SS_ENAB)));
610 }
611 l1ssctrl_after = dhdpcie_access_cap(dev, PCIE_EXTCAP_ID_L1SS,
612 PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
613 DHD_ERROR(("%s: %s %s, l1ssctrl_before: 0x%x l1ssctrl_after: 0x%x\n",
614 __FUNCTION__, device, (enable ? "ENABLE " : "DISABLE"),
615 l1ssctrl_before, l1ssctrl_after));
616
617 }
618
619 static bool
dhd_bus_is_rc_ep_l1ss_capable(dhd_bus_t * bus)620 dhd_bus_is_rc_ep_l1ss_capable(dhd_bus_t *bus)
621 {
622 uint32 rc_l1ss_cap;
623 uint32 ep_l1ss_cap;
624
625 #ifdef CUSTOMER_HW_ROCKCHIP
626 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
627 if (IS_ENABLED(CONFIG_PCIEASPM_ROCKCHIP_WIFI_EXTENSION))
628 return rk_dhd_bus_is_rc_ep_l1ss_capable(bus);
629 #endif
630 #endif
631 /* RC Extendend Capacility */
632 rc_l1ss_cap = dhdpcie_access_cap(bus->rc_dev, PCIE_EXTCAP_ID_L1SS,
633 PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
634 if (rc_l1ss_cap == BCME_ERROR) {
635 DHD_ERROR(("%s RC is not l1ss capable\n", __FUNCTION__));
636 return FALSE;
637 }
638
639 /* EP Extendend Capacility */
640 ep_l1ss_cap = dhdpcie_access_cap(bus->dev, PCIE_EXTCAP_ID_L1SS,
641 PCIE_EXTCAP_L1SS_CONTROL_OFFSET, TRUE, FALSE, 0);
642 if (ep_l1ss_cap == BCME_ERROR) {
643 DHD_ERROR(("%s EP is not l1ss capable\n", __FUNCTION__));
644 return FALSE;
645 }
646
647 return TRUE;
648 }
649
650 void
dhd_bus_l1ss_enable_rc_ep(dhd_bus_t * bus,bool enable)651 dhd_bus_l1ss_enable_rc_ep(dhd_bus_t *bus, bool enable)
652 {
653 bool ret;
654
655 if ((!bus->rc_ep_aspm_cap) || (!bus->rc_ep_l1ss_cap)) {
656 DHD_ERROR(("%s: NOT L1SS CAPABLE rc_ep_aspm_cap: %d rc_ep_l1ss_cap: %d\n",
657 __FUNCTION__, bus->rc_ep_aspm_cap, bus->rc_ep_l1ss_cap));
658 return;
659 }
660
661 /* Disable ASPM of RC and EP */
662 ret = dhd_bus_aspm_enable_rc_ep(bus, FALSE);
663
664 if (enable) {
665 /* Enable RC then EP */
666 dhd_bus_l1ss_enable_dev(bus, bus->rc_dev, enable);
667 dhd_bus_l1ss_enable_dev(bus, bus->dev, enable);
668 } else {
669 /* Disable EP then RC */
670 dhd_bus_l1ss_enable_dev(bus, bus->dev, enable);
671 dhd_bus_l1ss_enable_dev(bus, bus->rc_dev, enable);
672 }
673
674 /* Enable ASPM of RC and EP only if this API disabled */
675 if (ret == TRUE) {
676 dhd_bus_aspm_enable_rc_ep(bus, TRUE);
677 }
678 }
679
680 void
dhd_bus_aer_config(dhd_bus_t * bus)681 dhd_bus_aer_config(dhd_bus_t *bus)
682 {
683 uint32 val;
684
685 DHD_ERROR(("%s: Configure AER registers for EP\n", __FUNCTION__));
686 val = dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID,
687 PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0);
688 if (val != (uint32)-1) {
689 val &= ~CORR_ERR_AE;
690 dhdpcie_ep_access_cap(bus, PCIE_ADVERRREP_CAPID,
691 PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val);
692 } else {
693 DHD_ERROR(("%s: Invalid EP's PCIE_ADV_CORR_ERR_MASK: 0x%x\n",
694 __FUNCTION__, val));
695 }
696
697 DHD_ERROR(("%s: Configure AER registers for RC\n", __FUNCTION__));
698 val = dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID,
699 PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, FALSE, 0);
700 if (val != (uint32)-1) {
701 val &= ~CORR_ERR_AE;
702 dhdpcie_rc_access_cap(bus, PCIE_ADVERRREP_CAPID,
703 PCIE_ADV_CORR_ERR_MASK_OFFSET, TRUE, TRUE, val);
704 } else {
705 DHD_ERROR(("%s: Invalid RC's PCIE_ADV_CORR_ERR_MASK: 0x%x\n",
706 __FUNCTION__, val));
707 }
708 }
709
710 #ifdef DHD_PCIE_RUNTIMEPM
dhdpcie_pm_suspend(struct device * dev)711 static int dhdpcie_pm_suspend(struct device *dev)
712 {
713 int ret = 0;
714 struct pci_dev *pdev = to_pci_dev(dev);
715 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
716 dhd_bus_t *bus = NULL;
717 unsigned long flags;
718 int msglevel = dhd_msg_level;
719
720 printf("%s: Enter\n", __FUNCTION__);
721 if (pch) {
722 bus = pch->bus;
723 }
724 if (!bus) {
725 return ret;
726 }
727
728 DHD_GENERAL_LOCK(bus->dhd, flags);
729 if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
730 DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n",
731 __FUNCTION__, bus->dhd->dhd_bus_busy_state));
732 DHD_GENERAL_UNLOCK(bus->dhd, flags);
733 return -EBUSY;
734 }
735 DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
736 DHD_GENERAL_UNLOCK(bus->dhd, flags);
737
738 dhd_msg_level |= DHD_RPM_VAL;
739 if (bus->dhd->up)
740 ret = dhdpcie_set_suspend_resume(bus, TRUE);
741
742 DHD_GENERAL_LOCK(bus->dhd, flags);
743 DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
744 dhd_os_busbusy_wake(bus->dhd);
745 dhd_msg_level = msglevel;
746 printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
747 DHD_GENERAL_UNLOCK(bus->dhd, flags);
748
749 return ret;
750
751 }
752
dhdpcie_pm_prepare(struct device * dev)753 static int dhdpcie_pm_prepare(struct device *dev)
754 {
755 struct pci_dev *pdev = to_pci_dev(dev);
756 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
757 dhd_bus_t *bus = NULL;
758
759 if (!pch || !pch->bus) {
760 return 0;
761 }
762
763 bus = pch->bus;
764 DHD_DISABLE_RUNTIME_PM(bus->dhd);
765 bus->chk_pm = TRUE;
766
767 return 0;
768 }
769
dhdpcie_pm_resume(struct device * dev)770 static int dhdpcie_pm_resume(struct device *dev)
771 {
772 int ret = 0;
773 struct pci_dev *pdev = to_pci_dev(dev);
774 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
775 dhd_bus_t *bus = NULL;
776 unsigned long flags;
777 int msglevel = dhd_msg_level;
778
779 printf("%s: Enter\n", __FUNCTION__);
780 if (pch) {
781 bus = pch->bus;
782 }
783 if (!bus) {
784 return ret;
785 }
786
787 DHD_GENERAL_LOCK(bus->dhd, flags);
788 DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
789 DHD_GENERAL_UNLOCK(bus->dhd, flags);
790
791 dhd_msg_level |= DHD_RPM_VAL;
792 if (bus->dhd->up)
793 ret = dhdpcie_set_suspend_resume(bus, FALSE);
794
795 DHD_GENERAL_LOCK(bus->dhd, flags);
796 DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
797 dhd_os_busbusy_wake(bus->dhd);
798 dhd_msg_level = msglevel;
799 printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
800 DHD_GENERAL_UNLOCK(bus->dhd, flags);
801
802 return ret;
803 }
804
dhdpcie_pm_complete(struct device * dev)805 static void dhdpcie_pm_complete(struct device *dev)
806 {
807 struct pci_dev *pdev = to_pci_dev(dev);
808 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
809 dhd_bus_t *bus = NULL;
810
811 if (!pch || !pch->bus) {
812 return;
813 }
814
815 bus = pch->bus;
816 DHD_ENABLE_RUNTIME_PM(bus->dhd);
817 bus->chk_pm = FALSE;
818
819 return;
820 }
821 #else
dhdpcie_pci_suspend(struct pci_dev * pdev,pm_message_t state)822 static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state)
823 {
824 int ret = 0;
825 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
826 dhd_bus_t *bus = NULL;
827 unsigned long flags;
828 uint32 i = 0;
829
830 printf("%s: Enter\n", __FUNCTION__);
831 if (pch) {
832 bus = pch->bus;
833 }
834 if (!bus) {
835 return ret;
836 }
837
838 BCM_REFERENCE(state);
839
840 DHD_GENERAL_LOCK(bus->dhd, flags);
841 if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
842 DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n",
843 __FUNCTION__, bus->dhd->dhd_bus_busy_state));
844
845 DHD_GENERAL_UNLOCK(bus->dhd, flags);
846 OSL_DELAY(1000);
847 /* retry till the transaction is complete */
848 while (i < 100) {
849 OSL_DELAY(1000);
850 i++;
851
852 DHD_GENERAL_LOCK(bus->dhd, flags);
853 if (DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
854 DHD_ERROR(("%s: Bus enter IDLE!! after %d ms\n",
855 __FUNCTION__, i));
856 break;
857 }
858 if (i != 100) {
859 DHD_GENERAL_UNLOCK(bus->dhd, flags);
860 }
861 }
862 if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
863 DHD_GENERAL_UNLOCK(bus->dhd, flags);
864 DHD_ERROR(("%s: Bus not IDLE!! Failed after %d ms, "
865 "dhd_bus_busy_state = 0x%x\n",
866 __FUNCTION__, i, bus->dhd->dhd_bus_busy_state));
867 return -EBUSY;
868 }
869 }
870 DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
871 DHD_GENERAL_UNLOCK(bus->dhd, flags);
872
873 #ifdef DHD_CFG80211_SUSPEND_RESUME
874 dhd_cfg80211_suspend(bus->dhd);
875 #endif /* DHD_CFG80211_SUSPEND_RESUME */
876
877 if (!bus->dhd->dongle_reset)
878 ret = dhdpcie_set_suspend_resume(bus, TRUE);
879
880 DHD_GENERAL_LOCK(bus->dhd, flags);
881 DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
882 dhd_os_busbusy_wake(bus->dhd);
883 printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
884 DHD_GENERAL_UNLOCK(bus->dhd, flags);
885
886 return ret;
887 }
888
889 #if defined(BT_OVER_PCIE)
dhdpcie_pci_resume_early(struct pci_dev * pdev)890 static int dhdpcie_pci_resume_early(struct pci_dev *pdev)
891 {
892 int ret = 0;
893 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
894 dhd_bus_t *bus = NULL;
895 uint32 pmcsr;
896
897 if (pch) {
898 bus = pch->bus;
899 }
900 if (!bus) {
901 return ret;
902 }
903
904 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 9))
905 /* On fc30 (linux ver 5.0.9),
906 * PMEStat of PMCSR(cfg reg) is cleared before this callback by kernel.
907 * So, we use SwPme of FunctionControl(enum reg) instead of PMEStat without kernel change.
908 */
909 if (bus->sih->buscorerev >= 64) {
910 uint32 ftnctrl;
911 volatile void *regsva = (volatile void *)bus->regs;
912
913 ftnctrl = pcie_corereg(bus->osh, regsva,
914 OFFSETOF(sbpcieregs_t, ftn_ctrl.control), 0, 0);
915 pmcsr = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_PMCSR, sizeof(pmcsr));
916
917 DHD_ERROR(("%s(): pmcsr is 0x%x, ftnctrl is 0x%8x \r\n",
918 __FUNCTION__, pmcsr, ftnctrl));
919 if (ftnctrl & PCIE_FTN_SWPME_MASK) {
920 DHD_ERROR(("%s(): Wakeup due to WLAN \r\n", __FUNCTION__));
921 }
922 } else
923 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 9)) */
924 {
925 pmcsr = OSL_PCI_READ_CONFIG(bus->osh, PCIE_CFG_PMCSR, sizeof(pmcsr));
926
927 DHD_ERROR(("%s(): pmcsr is 0x%x \r\n", __FUNCTION__, pmcsr));
928 if (pmcsr & PCIE_PMCSR_PMESTAT) {
929 DHD_ERROR(("%s(): Wakeup due to WLAN \r\n", __FUNCTION__));
930 }
931 }
932
933 /*
934 * TODO: Add code to take adavantage of what is read from pmcsr
935 */
936
937 return ret;
938 }
939 #endif /* BT_OVER_PCIE */
940
dhdpcie_pci_resume(struct pci_dev * pdev)941 static int dhdpcie_pci_resume(struct pci_dev *pdev)
942 {
943 int ret = 0;
944 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
945 dhd_bus_t *bus = NULL;
946 unsigned long flags;
947
948 printf("%s: Enter\n", __FUNCTION__);
949 if (pch) {
950 bus = pch->bus;
951 }
952 if (!bus) {
953 return ret;
954 }
955
956 DHD_GENERAL_LOCK(bus->dhd, flags);
957 DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
958 DHD_GENERAL_UNLOCK(bus->dhd, flags);
959
960 if (!bus->dhd->dongle_reset)
961 ret = dhdpcie_set_suspend_resume(bus, FALSE);
962
963 DHD_GENERAL_LOCK(bus->dhd, flags);
964 DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
965 dhd_os_busbusy_wake(bus->dhd);
966 printf("%s: Exit ret=%d\n", __FUNCTION__, ret);
967 DHD_GENERAL_UNLOCK(bus->dhd, flags);
968
969 #ifdef DHD_CFG80211_SUSPEND_RESUME
970 dhd_cfg80211_resume(bus->dhd);
971 #endif /* DHD_CFG80211_SUSPEND_RESUME */
972 return ret;
973 }
974
975 #endif /* DHD_PCIE_RUNTIMEPM */
976
977 static int
978 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
dhdpcie_set_suspend_resume(dhd_bus_t * bus,bool state,bool byint)979 dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state, bool byint)
980 #else
981 dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state)
982 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
983 {
984 int ret = 0;
985
986 ASSERT(bus && !bus->dhd->dongle_reset);
987
988 #ifdef DHD_PCIE_RUNTIMEPM
989 /* if wakelock is held during suspend, return failed */
990 if (state == TRUE && dhd_os_check_wakelock_all(bus->dhd)) {
991 return -EBUSY;
992 }
993 mutex_lock(&bus->pm_lock);
994 #endif /* DHD_PCIE_RUNTIMEPM */
995 DHD_RPM(("%s: Enter state=%d\n", __FUNCTION__, state));
996
997 /* When firmware is not loaded do the PCI bus */
998 /* suspend/resume only */
999 if (bus->dhd->busstate == DHD_BUS_DOWN) {
1000 ret = dhdpcie_pci_suspend_resume(bus, state);
1001 #ifdef DHD_PCIE_RUNTIMEPM
1002 mutex_unlock(&bus->pm_lock);
1003 #endif /* DHD_PCIE_RUNTIMEPM */
1004 return ret;
1005 }
1006 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1007 ret = dhdpcie_bus_suspend(bus, state, byint);
1008 #else
1009 ret = dhdpcie_bus_suspend(bus, state);
1010 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1011
1012 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) && defined(DHD_TCP_LIMIT_OUTPUT)
1013 if (ret == BCME_OK) {
1014 /*
1015 * net.ipv4.tcp_limit_output_bytes is used for all ipv4 sockets
1016 * so, returning back to original value when there is no traffic(suspend)
1017 */
1018 if (state == TRUE) {
1019 dhd_ctrl_tcp_limit_output_bytes(0);
1020 } else {
1021 dhd_ctrl_tcp_limit_output_bytes(1);
1022 }
1023 }
1024 #endif /* LINUX_VERSION_CODE > 4.19.0 && DHD_TCP_LIMIT_OUTPUT */
1025 DHD_RPM(("%s: Exit ret=%d\n", __FUNCTION__, ret));
1026
1027 #ifdef DHD_PCIE_RUNTIMEPM
1028 mutex_unlock(&bus->pm_lock);
1029 #endif /* DHD_PCIE_RUNTIMEPM */
1030
1031 return ret;
1032 }
1033
1034 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
dhdpcie_pm_runtime_suspend(struct device * dev)1035 static int dhdpcie_pm_runtime_suspend(struct device * dev)
1036 {
1037 struct pci_dev *pdev = to_pci_dev(dev);
1038 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
1039 dhd_bus_t *bus = NULL;
1040 int ret = 0;
1041
1042 if (!pch)
1043 return -EBUSY;
1044
1045 bus = pch->bus;
1046
1047 DHD_RPM(("%s Enter\n", __FUNCTION__));
1048
1049 if (atomic_read(&bus->dhd->block_bus))
1050 return -EHOSTDOWN;
1051
1052 dhd_netif_stop_queue(bus);
1053 atomic_set(&bus->dhd->block_bus, TRUE);
1054
1055 if (dhdpcie_set_suspend_resume(pdev, TRUE, TRUE)) {
1056 pm_runtime_mark_last_busy(dev);
1057 ret = -EAGAIN;
1058 }
1059
1060 atomic_set(&bus->dhd->block_bus, FALSE);
1061 dhd_bus_start_queue(bus);
1062
1063 return ret;
1064 }
1065
dhdpcie_pm_runtime_resume(struct device * dev)1066 static int dhdpcie_pm_runtime_resume(struct device * dev)
1067 {
1068 struct pci_dev *pdev = to_pci_dev(dev);
1069 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
1070 dhd_bus_t *bus = pch->bus;
1071
1072 DHD_RPM(("%s Enter\n", __FUNCTION__));
1073
1074 if (atomic_read(&bus->dhd->block_bus))
1075 return -EHOSTDOWN;
1076
1077 if (dhdpcie_set_suspend_resume(pdev, FALSE, TRUE))
1078 return -EAGAIN;
1079
1080 return 0;
1081 }
1082
dhdpcie_pm_system_suspend_noirq(struct device * dev)1083 static int dhdpcie_pm_system_suspend_noirq(struct device * dev)
1084 {
1085 struct pci_dev *pdev = to_pci_dev(dev);
1086 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
1087 dhd_bus_t *bus = NULL;
1088 int ret;
1089
1090 DHD_RPM(("%s Enter\n", __FUNCTION__));
1091
1092 if (!pch)
1093 return -EBUSY;
1094
1095 bus = pch->bus;
1096
1097 if (atomic_read(&bus->dhd->block_bus))
1098 return -EHOSTDOWN;
1099
1100 dhd_netif_stop_queue(bus);
1101 atomic_set(&bus->dhd->block_bus, TRUE);
1102
1103 ret = dhdpcie_set_suspend_resume(pdev, TRUE, FALSE);
1104
1105 if (ret) {
1106 dhd_bus_start_queue(bus);
1107 atomic_set(&bus->dhd->block_bus, FALSE);
1108 }
1109
1110 return ret;
1111 }
1112
dhdpcie_pm_system_resume_noirq(struct device * dev)1113 static int dhdpcie_pm_system_resume_noirq(struct device * dev)
1114 {
1115 struct pci_dev *pdev = to_pci_dev(dev);
1116 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
1117 dhd_bus_t *bus = NULL;
1118 int ret;
1119
1120 if (!pch)
1121 return -EBUSY;
1122
1123 bus = pch->bus;
1124
1125 DHD_RPM(("%s Enter\n", __FUNCTION__));
1126
1127 ret = dhdpcie_set_suspend_resume(pdev, FALSE, FALSE);
1128
1129 atomic_set(&bus->dhd->block_bus, FALSE);
1130 dhd_bus_start_queue(bus);
1131 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
1132
1133 return ret;
1134 }
1135 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1136
1137 #if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1138 extern void dhd_dpc_tasklet_kill(dhd_pub_t *dhdp);
1139 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1140
1141 static void
dhdpcie_suspend_dump_cfgregs(struct dhd_bus * bus,char * suspend_state)1142 dhdpcie_suspend_dump_cfgregs(struct dhd_bus *bus, char *suspend_state)
1143 {
1144 DHD_RPM(("%s: BaseAddress0(0x%x)=0x%x, "
1145 "BaseAddress1(0x%x)=0x%x PCIE_CFG_PMCSR(0x%x)=0x%x "
1146 "PCI_BAR1_WIN(0x%x)=(0x%x)\n",
1147 suspend_state,
1148 PCIECFGREG_BASEADDR0,
1149 dhd_pcie_config_read(bus,
1150 PCIECFGREG_BASEADDR0, sizeof(uint32)),
1151 PCIECFGREG_BASEADDR1,
1152 dhd_pcie_config_read(bus,
1153 PCIECFGREG_BASEADDR1, sizeof(uint32)),
1154 PCIE_CFG_PMCSR,
1155 dhd_pcie_config_read(bus,
1156 PCIE_CFG_PMCSR, sizeof(uint32)),
1157 PCI_BAR1_WIN,
1158 dhd_pcie_config_read(bus,
1159 PCI_BAR1_WIN, sizeof(uint32))));
1160 }
1161
dhdpcie_suspend_dev(struct pci_dev * dev)1162 static int dhdpcie_suspend_dev(struct pci_dev *dev)
1163 {
1164 int ret;
1165 dhdpcie_info_t *pch = pci_get_drvdata(dev);
1166 dhd_bus_t *bus = pch->bus;
1167
1168 #if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1169 if (bus->is_linkdown) {
1170 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
1171 return BCME_ERROR;
1172 }
1173 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1174 DHD_RPM(("%s: Enter\n", __FUNCTION__));
1175 #if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \
1176 defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_EXYNOS2100) || \
1177 defined(CONFIG_SOC_EXYNOS1000)
1178 DHD_ERROR(("%s: Disable L1ss EP side\n", __FUNCTION__));
1179 exynos_pcie_l1ss_ctrl(0, PCIE_L1SS_CTRL_WIFI);
1180 #endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 ||
1181 * CONFIG_SOC_EXYNOS9830 || CONFIG_SOC_EXYNOS2100 ||
1182 * CONFIG_SOC_EXYNOS1000
1183 */
1184 #if defined(CONFIG_SOC_GS101)
1185 DHD_ERROR(("%s: Disable L1ss EP side\n", __FUNCTION__));
1186 exynos_pcie_rc_l1ss_ctrl(0, PCIE_L1SS_CTRL_WIFI, 1);
1187 #endif /* CONFIG_SOC_GS101 */
1188
1189 dhdpcie_suspend_dump_cfgregs(bus, "BEFORE_EP_SUSPEND");
1190 #if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1191 dhd_dpc_tasklet_kill(bus->dhd);
1192 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1193 pci_save_state(dev);
1194 #if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1195 pch->state = pci_store_saved_state(dev);
1196 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1197 pci_enable_wake(dev, PCI_D0, TRUE);
1198 if (pci_is_enabled(dev))
1199 pci_disable_device(dev);
1200
1201 ret = pci_set_power_state(dev, PCI_D3hot);
1202 if (ret) {
1203 DHD_ERROR(("%s: pci_set_power_state error %d\n",
1204 __FUNCTION__, ret));
1205 }
1206 #ifdef OEM_ANDROID
1207 // dev->state_saved = FALSE;
1208 #endif /* OEM_ANDROID */
1209 dhdpcie_suspend_dump_cfgregs(bus, "AFTER_EP_SUSPEND");
1210 return ret;
1211 }
1212
1213 #ifdef DHD_WAKE_STATUS
bcmpcie_get_total_wake(struct dhd_bus * bus)1214 int bcmpcie_get_total_wake(struct dhd_bus *bus)
1215 {
1216 dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
1217
1218 return pch->total_wake_count;
1219 }
1220
bcmpcie_set_get_wake(struct dhd_bus * bus,int flag)1221 int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag)
1222 {
1223 dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
1224 unsigned long flags;
1225 int ret;
1226
1227 DHD_PKT_WAKE_LOCK(&pch->pkt_wake_lock, flags);
1228
1229 ret = pch->pkt_wake;
1230 pch->total_wake_count += flag;
1231 pch->pkt_wake = flag;
1232
1233 DHD_PKT_WAKE_UNLOCK(&pch->pkt_wake_lock, flags);
1234 return ret;
1235 }
1236 #endif /* DHD_WAKE_STATUS */
1237
dhdpcie_resume_dev(struct pci_dev * dev)1238 static int dhdpcie_resume_dev(struct pci_dev *dev)
1239 {
1240 int err = 0;
1241 dhdpcie_info_t *pch = pci_get_drvdata(dev);
1242 #if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1243 pci_load_and_free_saved_state(dev, &pch->state);
1244 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1245 DHD_RPM(("%s: Enter\n", __FUNCTION__));
1246 #ifdef OEM_ANDROID
1247 // dev->state_saved = TRUE;
1248 #endif /* OEM_ANDROID */
1249 pci_restore_state(dev);
1250
1251 /* Resture back current bar1 window */
1252 OSL_PCI_WRITE_CONFIG(pch->bus->osh, PCI_BAR1_WIN, 4, pch->bus->curr_bar1_win);
1253
1254 #ifdef FORCE_TPOWERON
1255 if (dhdpcie_chip_req_forced_tpoweron(pch->bus)) {
1256 dhd_bus_set_tpoweron(pch->bus, tpoweron_scale);
1257 }
1258 #endif /* FORCE_TPOWERON */
1259 err = pci_enable_device(dev);
1260 if (err) {
1261 printf("%s:pci_enable_device error %d \n", __FUNCTION__, err);
1262 goto out;
1263 }
1264 pci_set_master(dev);
1265 err = pci_set_power_state(dev, PCI_D0);
1266 if (err) {
1267 printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err);
1268 goto out;
1269 }
1270 BCM_REFERENCE(pch);
1271 dhdpcie_suspend_dump_cfgregs(pch->bus, "AFTER_EP_RESUME");
1272 #if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820) || \
1273 defined(CONFIG_SOC_EXYNOS9830) || defined(CONFIG_SOC_EXYNOS2100) || \
1274 defined(CONFIG_SOC_EXYNOS1000)
1275 DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__));
1276 exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI);
1277 #endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 ||
1278 * CONFIG_SOC_EXYNOS9830 || CONFIG_SOC_EXYNOS2100 ||
1279 * CONFIG_SOC_EXYNOS1000
1280 */
1281 #if defined(CONFIG_SOC_GS101)
1282 DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__));
1283 exynos_pcie_rc_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI, 1);
1284 #endif /* CONFIG_SOC_GS101 */
1285
1286 out:
1287 return err;
1288 }
1289
dhdpcie_resume_host_dev(dhd_bus_t * bus)1290 static int dhdpcie_resume_host_dev(dhd_bus_t *bus)
1291 {
1292 int bcmerror = 0;
1293
1294 bcmerror = dhdpcie_start_host_dev(bus);
1295 if (bcmerror < 0) {
1296 DHD_ERROR(("%s: PCIe RC resume failed!!! (%d)\n",
1297 __FUNCTION__, bcmerror));
1298 bus->is_linkdown = 1;
1299 #ifdef SUPPORT_LINKDOWN_RECOVERY
1300 #ifdef CONFIG_ARCH_MSM
1301 bus->no_cfg_restore = 1;
1302 #endif /* CONFIG_ARCH_MSM */
1303 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1304 }
1305
1306 return bcmerror;
1307 }
1308
dhdpcie_suspend_host_dev(dhd_bus_t * bus)1309 static int dhdpcie_suspend_host_dev(dhd_bus_t *bus)
1310 {
1311 int bcmerror = 0;
1312 #ifdef CONFIG_ARCH_EXYNOS
1313 /*
1314 * XXX : SWWLAN-82173, SWWLAN-82183 WAR for SS PCIe RC
1315 * SS PCIe RC/EP is 1 to 1 mapping using different channel
1316 * RC0 - LTE, RC1 - WiFi RC0-1 is working independently
1317 */
1318
1319 if (bus->rc_dev) {
1320 pci_save_state(bus->rc_dev);
1321 } else {
1322 DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
1323 __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
1324 }
1325 #endif /* CONFIG_ARCH_EXYNOS */
1326 bcmerror = dhdpcie_stop_host_dev(bus);
1327 return bcmerror;
1328 }
1329
1330 int
dhdpcie_set_master_and_d0_pwrstate(dhd_bus_t * bus)1331 dhdpcie_set_master_and_d0_pwrstate(dhd_bus_t *bus)
1332 {
1333 int err;
1334 pci_set_master(bus->dev);
1335 err = pci_set_power_state(bus->dev, PCI_D0);
1336 if (err) {
1337 DHD_ERROR(("%s: pci_set_power_state error %d \n", __FUNCTION__, err));
1338 }
1339 return err;
1340 }
1341
1342 uint32
dhdpcie_rc_config_read(dhd_bus_t * bus,uint offset)1343 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset)
1344 {
1345 uint val = -1; /* Initialise to 0xfffffff */
1346 if (bus->rc_dev) {
1347 pci_read_config_dword(bus->rc_dev, offset, &val);
1348 OSL_DELAY(100);
1349 } else {
1350 DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
1351 __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
1352 }
1353 DHD_ERROR(("%s: RC %x:%x offset 0x%x val 0x%x\n",
1354 __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, offset, val));
1355 return (val);
1356 }
1357
1358 /*
1359 * Reads/ Writes the value of capability register
1360 * from the given CAP_ID section of PCI Root Port
1361 *
1362 * Arguements
1363 * @bus current dhd_bus_t pointer
1364 * @cap Capability or Extended Capability ID to get
1365 * @offset offset of Register to Read
1366 * @is_ext TRUE if @cap is given for Extended Capability
1367 * @is_write is set to TRUE to indicate write
1368 * @val value to write
1369 *
1370 * Return Value
1371 * Returns 0xffffffff on error
1372 * on write success returns BCME_OK (0)
1373 * on Read Success returns the value of register requested
1374 * Note: caller shoud ensure valid capability ID and Ext. Capability ID.
1375 */
1376
1377 uint32
dhdpcie_access_cap(struct pci_dev * pdev,int cap,uint offset,bool is_ext,bool is_write,uint32 writeval)1378 dhdpcie_access_cap(struct pci_dev *pdev, int cap, uint offset, bool is_ext, bool is_write,
1379 uint32 writeval)
1380 {
1381 int cap_ptr = 0;
1382 uint32 ret = -1;
1383 uint32 readval;
1384
1385 if (!(pdev)) {
1386 DHD_ERROR(("%s: pdev is NULL\n", __FUNCTION__));
1387 return ret;
1388 }
1389
1390 /* Find Capability offset */
1391 if (is_ext) {
1392 /* removing max EXT_CAP_ID check as
1393 * linux kernel definition's max value is not upadted yet as per spec
1394 */
1395 cap_ptr = pci_find_ext_capability(pdev, cap);
1396
1397 } else {
1398 /* removing max PCI_CAP_ID_MAX check as
1399 * pervious kernel versions dont have this definition
1400 */
1401 cap_ptr = pci_find_capability(pdev, cap);
1402 }
1403
1404 /* Return if capability with given ID not found */
1405 if (cap_ptr == 0) {
1406 DHD_ERROR(("%s: PCI Cap(0x%02x) not supported.\n",
1407 __FUNCTION__, cap));
1408 return BCME_ERROR;
1409 }
1410
1411 if (is_write) {
1412 pci_write_config_dword(pdev, (cap_ptr + offset), writeval);
1413 ret = BCME_OK;
1414
1415 } else {
1416
1417 pci_read_config_dword(pdev, (cap_ptr + offset), &readval);
1418 ret = readval;
1419 }
1420
1421 return ret;
1422 }
1423
1424 uint32
dhdpcie_rc_access_cap(dhd_bus_t * bus,int cap,uint offset,bool is_ext,bool is_write,uint32 writeval)1425 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write,
1426 uint32 writeval)
1427 {
1428 if (!(bus->rc_dev)) {
1429 DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
1430 __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
1431 return BCME_ERROR;
1432 }
1433
1434 return dhdpcie_access_cap(bus->rc_dev, cap, offset, is_ext, is_write, writeval);
1435 }
1436
1437 uint32
dhdpcie_ep_access_cap(dhd_bus_t * bus,int cap,uint offset,bool is_ext,bool is_write,uint32 writeval)1438 dhdpcie_ep_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write,
1439 uint32 writeval)
1440 {
1441 if (!(bus->dev)) {
1442 DHD_ERROR(("%s: EP handle is NULL\n", __FUNCTION__));
1443 return BCME_ERROR;
1444 }
1445
1446 return dhdpcie_access_cap(bus->dev, cap, offset, is_ext, is_write, writeval);
1447 }
1448
1449 /* API wrapper to read Root Port link capability
1450 * Returns 2 = GEN2 1 = GEN1 BCME_ERR on linkcap not found
1451 */
1452
dhd_debug_get_rc_linkcap(dhd_bus_t * bus)1453 uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus)
1454 {
1455 uint32 linkcap = -1;
1456 linkcap = dhdpcie_rc_access_cap(bus, PCIE_CAP_ID_EXP,
1457 PCIE_CAP_LINKCAP_OFFSET, FALSE, FALSE, 0);
1458 linkcap &= PCIE_CAP_LINKCAP_LNKSPEED_MASK;
1459 return linkcap;
1460 }
1461
dhdpcie_config_save_restore_coherent(dhd_bus_t * bus,bool state)1462 static void dhdpcie_config_save_restore_coherent(dhd_bus_t *bus, bool state)
1463 {
1464 if (bus->coreid == ARMCA7_CORE_ID) {
1465 if (state) {
1466 /* Sleep */
1467 bus->coherent_state = dhdpcie_bus_cfg_read_dword(bus,
1468 PCIE_CFG_SUBSYSTEM_CONTROL, 4) & PCIE_BARCOHERENTACCEN_MASK;
1469 } else {
1470 uint32 val = (dhdpcie_bus_cfg_read_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL,
1471 4) & ~PCIE_BARCOHERENTACCEN_MASK) | bus->coherent_state;
1472 dhdpcie_bus_cfg_write_dword(bus, PCIE_CFG_SUBSYSTEM_CONTROL, 4, val);
1473 }
1474 }
1475 }
1476
dhdpcie_pci_suspend_resume(dhd_bus_t * bus,bool state)1477 int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state)
1478 {
1479 int rc;
1480
1481 struct pci_dev *dev = bus->dev;
1482
1483 if (state) {
1484 dhdpcie_config_save_restore_coherent(bus, state);
1485 #if !defined(BCMPCIE_OOB_HOST_WAKE) && !defined(PCIE_OOB)
1486 dhdpcie_pme_active(bus->osh, state);
1487 #endif /* !BCMPCIE_OOB_HOST_WAKE && !PCIE_OOB */
1488 rc = dhdpcie_suspend_dev(dev);
1489 if (!rc) {
1490 dhdpcie_suspend_host_dev(bus);
1491 }
1492 } else {
1493 rc = dhdpcie_resume_host_dev(bus);
1494 if (!rc) {
1495 rc = dhdpcie_resume_dev(dev);
1496 if (PCIECTO_ENAB(bus)) {
1497 /* reinit CTO configuration
1498 * because cfg space got reset at D3 (PERST)
1499 */
1500 dhdpcie_cto_cfg_init(bus, TRUE);
1501 }
1502 if (PCIE_ENUM_RESET_WAR_ENAB(bus->sih->buscorerev)) {
1503 dhdpcie_ssreset_dis_enum_rst(bus);
1504 }
1505 #if !defined(BCMPCIE_OOB_HOST_WAKE) && !defined(PCIE_OOB)
1506 dhdpcie_pme_active(bus->osh, state);
1507 #endif /* !BCMPCIE_OOB_HOST_WAKE && !PCIE_OOB */
1508 }
1509 dhdpcie_config_save_restore_coherent(bus, state);
1510 #if defined(OEM_ANDROID)
1511 #if defined(DHD_HANG_SEND_UP_TEST)
1512 if (bus->is_linkdown ||
1513 bus->dhd->req_hang_type == HANG_REASON_PCIE_RC_LINK_UP_FAIL)
1514 #else /* DHD_HANG_SEND_UP_TEST */
1515 if (bus->is_linkdown)
1516 #endif /* DHD_HANG_SEND_UP_TEST */
1517 {
1518 bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL;
1519 dhd_os_send_hang_message(bus->dhd);
1520 }
1521 #endif /* OEM_ANDROID */
1522 }
1523 return rc;
1524 }
1525
dhdpcie_device_scan(struct device * dev,void * data)1526 static int dhdpcie_device_scan(struct device *dev, void *data)
1527 {
1528 struct pci_dev *pcidev;
1529 int *cnt = data;
1530
1531 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
1532 pcidev = container_of(dev, struct pci_dev, dev);
1533 GCC_DIAGNOSTIC_POP();
1534
1535 if (pcidev->vendor != 0x14e4)
1536 return 0;
1537
1538 DHD_INFO(("Found Broadcom PCI device 0x%04x\n", pcidev->device));
1539 *cnt += 1;
1540 if (pcidev->driver && strcmp(pcidev->driver->name, dhdpcie_driver.name))
1541 DHD_ERROR(("Broadcom PCI Device 0x%04x has allocated with driver %s\n",
1542 pcidev->device, pcidev->driver->name));
1543
1544 return 0;
1545 }
1546
1547 int
dhdpcie_bus_register(void)1548 dhdpcie_bus_register(void)
1549 {
1550 int error = 0;
1551
1552 if (!(error = pci_register_driver(&dhdpcie_driver))) {
1553 bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error, dhdpcie_device_scan);
1554 if (!error) {
1555 DHD_ERROR(("No Broadcom PCI device enumerated!\n"));
1556 #ifdef DHD_PRELOAD
1557 return 0;
1558 #endif
1559 } else if (!dhdpcie_init_succeeded) {
1560 DHD_ERROR(("%s: dhdpcie initialize failed.\n", __FUNCTION__));
1561 } else {
1562 return 0;
1563 }
1564
1565 pci_unregister_driver(&dhdpcie_driver);
1566 error = BCME_ERROR;
1567 }
1568
1569 return error;
1570 }
1571
1572 void
dhdpcie_bus_unregister(void)1573 dhdpcie_bus_unregister(void)
1574 {
1575 pci_unregister_driver(&dhdpcie_driver);
1576 }
1577
1578 int __devinit
dhdpcie_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1579 dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1580 {
1581 int err = 0;
1582 DHD_MUTEX_LOCK();
1583
1584 if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) {
1585 DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__));
1586 err = -ENODEV;
1587 goto exit;
1588 }
1589
1590 printf("PCI_PROBE: bus %X, slot %X,vendor %X, device %X"
1591 "(good PCI location)\n", pdev->bus->number,
1592 PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device);
1593
1594 if (dhdpcie_init_succeeded == TRUE) {
1595 DHD_ERROR(("%s(): === Driver Already attached to a BRCM device === \r\n",
1596 __FUNCTION__));
1597 err = -ENODEV;
1598 goto exit;
1599 }
1600
1601 if (dhdpcie_init (pdev)) {
1602 DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__));
1603 err = -ENODEV;
1604 goto exit;
1605 }
1606
1607 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1608 /*
1609 Since MSM PCIe RC dev usage conunt already incremented +2 even
1610 before dhdpcie_pci_probe() called, then we inevitably to call
1611 pm_runtime_put_noidle() two times to make the count start with zero.
1612 */
1613
1614 pm_runtime_put_noidle(&pdev->dev);
1615 pm_runtime_put_noidle(&pdev->dev);
1616 pm_runtime_set_suspended(&pdev->dev);
1617 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1618
1619 #ifdef BCMPCIE_DISABLE_ASYNC_SUSPEND
1620 /* disable async suspend */
1621 device_disable_async_suspend(&pdev->dev);
1622 #endif /* BCMPCIE_DISABLE_ASYNC_SUSPEND */
1623
1624 DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__));
1625 exit:
1626 DHD_MUTEX_UNLOCK();
1627 return err;
1628 }
1629
1630 int
dhdpcie_detach(dhdpcie_info_t * pch)1631 dhdpcie_detach(dhdpcie_info_t *pch)
1632 {
1633 if (pch) {
1634 #if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1635 if (!dhd_download_fw_on_driverload) {
1636 pci_load_and_free_saved_state(pch->dev, &pch->default_state);
1637 }
1638 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1639 MFREE(pch->osh, pch, sizeof(dhdpcie_info_t));
1640 }
1641 return 0;
1642 }
1643
1644 void __devexit
dhdpcie_pci_remove(struct pci_dev * pdev)1645 dhdpcie_pci_remove(struct pci_dev *pdev)
1646 {
1647 osl_t *osh = NULL;
1648 dhdpcie_info_t *pch = NULL;
1649 dhd_bus_t *bus = NULL;
1650
1651 DHD_TRACE(("%s Enter\n", __FUNCTION__));
1652
1653 DHD_MUTEX_LOCK();
1654
1655 pch = pci_get_drvdata(pdev);
1656 bus = pch->bus;
1657 osh = pch->osh;
1658
1659 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1660 pm_runtime_get_noresume(&pdev->dev);
1661 pm_runtime_get_noresume(&pdev->dev);
1662 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1663
1664 if (bus) {
1665 #ifdef SUPPORT_LINKDOWN_RECOVERY
1666 #ifdef CONFIG_ARCH_MSM
1667 msm_pcie_deregister_event(&bus->pcie_event);
1668 #endif /* CONFIG_ARCH_MSM */
1669 #ifdef CONFIG_ARCH_EXYNOS
1670 exynos_pcie_deregister_event(&bus->pcie_event);
1671 #endif /* CONFIG_ARCH_EXYNOS */
1672 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1673
1674 bus->rc_dev = NULL;
1675
1676 dhdpcie_bus_release(bus);
1677 }
1678
1679 /*
1680 * For module type driver,
1681 * it needs to back up configuration space before rmmod
1682 * Since original backed up configuration space won't be restored if state_saved = false
1683 * This back up the configuration space again & state_saved = true
1684 */
1685 pci_save_state(pdev);
1686
1687 if (pci_is_enabled(pdev))
1688 pci_disable_device(pdev);
1689 #ifdef BCMPCIE_OOB_HOST_WAKE
1690 /* pcie os info detach */
1691 MFREE(osh, pch->os_cxt, sizeof(dhdpcie_os_info_t));
1692 #endif /* BCMPCIE_OOB_HOST_WAKE */
1693 #ifdef USE_SMMU_ARCH_MSM
1694 /* smmu info detach */
1695 dhdpcie_smmu_remove(pdev, pch->smmu_cxt);
1696 MFREE(osh, pch->smmu_cxt, sizeof(dhdpcie_smmu_info_t));
1697 #endif /* USE_SMMU_ARCH_MSM */
1698 /* pcie info detach */
1699 dhdpcie_detach(pch);
1700 /* osl detach */
1701 osl_detach(osh);
1702
1703 #if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \
1704 defined(CONFIG_ARCH_APQ8084)
1705 brcm_pcie_wake.wake_irq = NULL;
1706 brcm_pcie_wake.data = NULL;
1707 #endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */
1708
1709 dhdpcie_init_succeeded = FALSE;
1710
1711 DHD_MUTEX_UNLOCK();
1712
1713 DHD_TRACE(("%s Exit\n", __FUNCTION__));
1714
1715 return;
1716 }
1717
1718 /* Enable Linux Msi */
1719 int
dhdpcie_enable_msi(struct pci_dev * pdev,unsigned int min_vecs,unsigned int max_vecs)1720 dhdpcie_enable_msi(struct pci_dev *pdev, unsigned int min_vecs, unsigned int max_vecs)
1721 {
1722 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
1723 return pci_alloc_irq_vectors(pdev, min_vecs, max_vecs, PCI_IRQ_MSI);
1724 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
1725 return pci_enable_msi_range(pdev, min_vecs, max_vecs);
1726 #else
1727 return pci_enable_msi_block(pdev, max_vecs);
1728 #endif
1729 }
1730
1731 /* Disable Linux Msi */
1732 void
dhdpcie_disable_msi(struct pci_dev * pdev)1733 dhdpcie_disable_msi(struct pci_dev *pdev)
1734 {
1735 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
1736 pci_free_irq_vectors(pdev);
1737 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0))
1738 pci_disable_msi(pdev);
1739 #else
1740 pci_disable_msi(pdev);
1741 #endif
1742 return;
1743 }
1744
1745 /* Request Linux irq */
1746 int
dhdpcie_request_irq(dhdpcie_info_t * dhdpcie_info)1747 dhdpcie_request_irq(dhdpcie_info_t *dhdpcie_info)
1748 {
1749 dhd_bus_t *bus = dhdpcie_info->bus;
1750 struct pci_dev *pdev = dhdpcie_info->bus->dev;
1751 int host_irq_disabled, err = 0;
1752
1753 if (!bus->irq_registered) {
1754 snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname),
1755 "dhdpcie:%s", pci_name(pdev));
1756
1757 if (bus->d2h_intr_method == PCIE_MSI) {
1758 if (dhdpcie_enable_msi(pdev, 1, 1) < 0) {
1759 DHD_ERROR(("%s: dhdpcie_enable_msi() failed\n", __FUNCTION__));
1760 dhdpcie_disable_msi(pdev);
1761 bus->d2h_intr_method = PCIE_INTX;
1762 }
1763 }
1764
1765 if (bus->d2h_intr_method == PCIE_MSI)
1766 printf("%s: MSI enabled, irq=%d\n", __FUNCTION__, pdev->irq);
1767 else
1768 printf("%s: INTx enabled, irq=%d\n", __FUNCTION__, pdev->irq);
1769
1770 err = request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED,
1771 dhdpcie_info->pciname, bus);
1772 if (err < 0) {
1773 DHD_ERROR(("%s: request_irq() failed with %d\n", __FUNCTION__, err));
1774 if (bus->d2h_intr_method == PCIE_MSI) {
1775 dhdpcie_disable_msi(pdev);
1776 }
1777 return -1;
1778 }
1779 else {
1780 bus->irq_registered = TRUE;
1781 }
1782 } else {
1783 DHD_ERROR(("%s: PCI IRQ is already registered\n", __FUNCTION__));
1784 }
1785
1786 host_irq_disabled = dhdpcie_irq_disabled(bus);
1787 if (host_irq_disabled) {
1788 DHD_ERROR(("%s: PCIe IRQ was disabled(%d), so, enabled it again\n",
1789 __FUNCTION__, host_irq_disabled));
1790 dhdpcie_enable_irq(bus);
1791 }
1792
1793 DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname));
1794
1795 return 0; /* SUCCESS */
1796 }
1797
1798 /**
1799 * dhdpcie_get_pcieirq - return pcie irq number to linux-dhd
1800 */
1801 int
dhdpcie_get_pcieirq(struct dhd_bus * bus,unsigned int * irq)1802 dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq)
1803 {
1804 struct pci_dev *pdev = bus->dev;
1805
1806 if (!pdev) {
1807 DHD_ERROR(("%s : bus->dev is NULL\n", __FUNCTION__));
1808 return -ENODEV;
1809 }
1810
1811 *irq = pdev->irq;
1812
1813 return 0; /* SUCCESS */
1814 }
1815
1816 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1817 #define PRINTF_RESOURCE "0x%016llx"
1818 #else
1819 #define PRINTF_RESOURCE "0x%08x"
1820 #endif
1821
1822 #ifdef EXYNOS_PCIE_MODULE_PATCH
1823 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1824 extern struct pci_saved_state *bcm_pcie_default_state;
1825 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1826 #endif /* EXYNOS_MODULE_PATCH */
1827
1828 /*
1829
1830 Name: osl_pci_get_resource
1831
1832 Parametrs:
1833
1834 1: struct pci_dev *pdev -- pci device structure
1835 2: pci_res -- structure containing pci configuration space values
1836
1837 Return value:
1838
1839 int - Status (TRUE or FALSE)
1840
1841 Description:
1842 Access PCI configuration space, retrieve PCI allocated resources , updates in resource structure.
1843
1844 */
dhdpcie_get_resource(dhdpcie_info_t * dhdpcie_info)1845 int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info)
1846 {
1847 phys_addr_t bar0_addr, bar1_addr;
1848 ulong bar1_size;
1849 struct pci_dev *pdev = NULL;
1850 pdev = dhdpcie_info->dev;
1851 #ifdef EXYNOS_PCIE_MODULE_PATCH
1852 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1853 if (bcm_pcie_default_state) {
1854 pci_load_saved_state(pdev, bcm_pcie_default_state);
1855 pci_restore_state(pdev);
1856 }
1857 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1858 #endif /* EXYNOS_MODULE_PATCH */
1859
1860 /*
1861 * For built-in type driver,
1862 * it can't restore configuration backup because of state_saved = false at first load time
1863 * For module type driver,
1864 * it couldn't remap the BAR0/BAR1 address
1865 * without restoring configuration backup at second load,
1866 * and remains configuration backup in pci_dev, DHD didn't remove it from the bus
1867 * pci_restore_state() restores proper BAR0/BAR1 address
1868 */
1869 pci_restore_state(pdev);
1870
1871 do {
1872 if (pci_enable_device(pdev)) {
1873 printf("%s: Cannot enable PCI device\n", __FUNCTION__);
1874 break;
1875 }
1876 pci_set_master(pdev);
1877 bar0_addr = pci_resource_start(pdev, 0); /* Bar-0 mapped address */
1878 bar1_addr = pci_resource_start(pdev, 2); /* Bar-1 mapped address */
1879
1880 /* read Bar-1 mapped memory range */
1881 bar1_size = pci_resource_len(pdev, 2);
1882
1883 if ((bar1_size == 0) || (bar1_addr == 0)) {
1884 printf("%s: BAR1 Not enabled for this device size(%ld),"
1885 " addr(0x"PRINTF_RESOURCE")\n",
1886 __FUNCTION__, bar1_size, bar1_addr);
1887 goto err;
1888 }
1889
1890 dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
1891 dhdpcie_info->bar1_size =
1892 (bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
1893 dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->bar1_size);
1894
1895 if (!dhdpcie_info->regs || !dhdpcie_info->tcm) {
1896 DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__));
1897 break;
1898 }
1899 #ifdef EXYNOS_PCIE_MODULE_PATCH
1900 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1901 if (bcm_pcie_default_state == NULL) {
1902 pci_save_state(pdev);
1903 bcm_pcie_default_state = pci_store_saved_state(pdev);
1904 }
1905 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1906 #endif /* EXYNOS_MODULE_PATCH */
1907
1908 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1909 /* Backup PCIe configuration so as to use Wi-Fi on/off process
1910 * in case of built in driver
1911 */
1912 pci_save_state(pdev);
1913 dhdpcie_info->default_state = pci_store_saved_state(pdev);
1914
1915 if (dhdpcie_info->default_state == NULL) {
1916 DHD_ERROR(("%s pci_store_saved_state returns NULL\n",
1917 __FUNCTION__));
1918 REG_UNMAP(dhdpcie_info->regs);
1919 REG_UNMAP(dhdpcie_info->tcm);
1920 pci_disable_device(pdev);
1921 break;
1922 }
1923 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1924
1925 DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
1926 __FUNCTION__, dhdpcie_info->regs, bar0_addr));
1927 DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n",
1928 __FUNCTION__, dhdpcie_info->tcm, bar1_addr));
1929
1930 return 0; /* SUCCESS */
1931 } while (0);
1932 err:
1933 return -1; /* FAILURE */
1934 }
1935
dhdpcie_scan_resource(dhdpcie_info_t * dhdpcie_info)1936 int dhdpcie_scan_resource(dhdpcie_info_t *dhdpcie_info)
1937 {
1938
1939 DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
1940
1941 do {
1942 /* define it here only!! */
1943 if (dhdpcie_get_resource (dhdpcie_info)) {
1944 DHD_ERROR(("%s: Failed to get PCI resources\n", __FUNCTION__));
1945 break;
1946 }
1947 DHD_TRACE(("%s:Exit - SUCCESS \n",
1948 __FUNCTION__));
1949
1950 return 0; /* SUCCESS */
1951
1952 } while (0);
1953
1954 DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
1955
1956 return -1; /* FAILURE */
1957
1958 }
1959
dhdpcie_dump_resource(dhd_bus_t * bus)1960 void dhdpcie_dump_resource(dhd_bus_t *bus)
1961 {
1962 dhdpcie_info_t *pch;
1963
1964 if (bus == NULL) {
1965 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
1966 return;
1967 }
1968
1969 if (bus->dev == NULL) {
1970 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
1971 return;
1972 }
1973
1974 pch = pci_get_drvdata(bus->dev);
1975 if (pch == NULL) {
1976 DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
1977 return;
1978 }
1979
1980 /* BAR0 */
1981 DHD_RPM(("%s: BAR0(VA): 0x%pK, BAR0(PA): "PRINTF_RESOURCE", SIZE: %d\n",
1982 __FUNCTION__, pch->regs, pci_resource_start(bus->dev, 0),
1983 DONGLE_REG_MAP_SIZE));
1984
1985 /* BAR1 */
1986 DHD_RPM(("%s: BAR1(VA): 0x%pK, BAR1(PA): "PRINTF_RESOURCE", SIZE: %d\n",
1987 __FUNCTION__, pch->tcm, pci_resource_start(bus->dev, 2),
1988 pch->bar1_size));
1989 }
1990
1991 #ifdef SUPPORT_LINKDOWN_RECOVERY
1992 #if defined(CONFIG_ARCH_MSM) || defined(CONFIG_ARCH_EXYNOS)
dhdpcie_linkdown_cb(struct_pcie_notify * noti)1993 void dhdpcie_linkdown_cb(struct_pcie_notify *noti)
1994 {
1995 struct pci_dev *pdev = (struct pci_dev *)noti->user;
1996 dhdpcie_info_t *pch = NULL;
1997
1998 if (pdev) {
1999 pch = pci_get_drvdata(pdev);
2000 if (pch) {
2001 dhd_bus_t *bus = pch->bus;
2002 if (bus) {
2003 dhd_pub_t *dhd = bus->dhd;
2004 if (dhd) {
2005 #ifdef CONFIG_ARCH_MSM
2006 DHD_ERROR(("%s: Set no_cfg_restore flag\n",
2007 __FUNCTION__));
2008 bus->no_cfg_restore = 1;
2009 #endif /* CONFIG_ARCH_MSM */
2010 #ifdef DHD_SSSR_DUMP
2011 if (dhd->fis_triggered) {
2012 DHD_ERROR(("%s: PCIe linkdown due to FIS, Ignore\n",
2013 __FUNCTION__));
2014 } else
2015 #endif /* DHD_SSSR_DUMP */
2016 {
2017 DHD_ERROR(("%s: Event HANG send up "
2018 "due to PCIe linkdown\n",
2019 __FUNCTION__));
2020 bus->is_linkdown = 1;
2021 dhd->hang_reason =
2022 HANG_REASON_PCIE_LINK_DOWN_RC_DETECT;
2023 dhd_os_send_hang_message(dhd);
2024 }
2025 }
2026 }
2027 }
2028 }
2029
2030 }
2031 #endif /* CONFIG_ARCH_MSM || CONFIG_ARCH_EXYNOS */
2032 #endif /* SUPPORT_LINKDOWN_RECOVERY */
2033
dhdpcie_init(struct pci_dev * pdev)2034 int dhdpcie_init(struct pci_dev *pdev)
2035 {
2036
2037 osl_t *osh = NULL;
2038 dhd_bus_t *bus = NULL;
2039 dhdpcie_info_t *dhdpcie_info = NULL;
2040 wifi_adapter_info_t *adapter = NULL;
2041 #ifdef BCMPCIE_OOB_HOST_WAKE
2042 dhdpcie_os_info_t *dhdpcie_osinfo = NULL;
2043 #endif /* BCMPCIE_OOB_HOST_WAKE */
2044 #ifdef USE_SMMU_ARCH_MSM
2045 dhdpcie_smmu_info_t *dhdpcie_smmu_info = NULL;
2046 #endif /* USE_SMMU_ARCH_MSM */
2047 int ret = 0;
2048
2049 do {
2050 /* osl attach */
2051 if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
2052 DHD_ERROR(("%s: osl_attach failed\n", __FUNCTION__));
2053 break;
2054 }
2055
2056 /* initialize static buffer */
2057 adapter = dhd_wifi_platform_get_adapter(PCI_BUS, pdev->bus->number,
2058 PCI_SLOT(pdev->devfn));
2059 if (adapter != NULL) {
2060 DHD_ERROR(("%s: found adapter info '%s'\n", __FUNCTION__, adapter->name));
2061 adapter->bus_type = PCI_BUS;
2062 adapter->bus_num = pdev->bus->number;
2063 adapter->slot_num = PCI_SLOT(pdev->devfn);
2064 adapter->pci_dev = pdev;
2065 } else {
2066 DHD_ERROR(("%s: can't find adapter info for this chip\n", __FUNCTION__));
2067 #ifdef ADAPTER_IDX
2068 break;
2069 #endif
2070 }
2071 osl_static_mem_init(osh, adapter);
2072
2073 /* allocate linux spcific pcie structure here */
2074 if (!(dhdpcie_info = MALLOC(osh, sizeof(dhdpcie_info_t)))) {
2075 DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
2076 break;
2077 }
2078 bzero(dhdpcie_info, sizeof(dhdpcie_info_t));
2079 dhdpcie_info->osh = osh;
2080 dhdpcie_info->dev = pdev;
2081
2082 #ifdef BCMPCIE_OOB_HOST_WAKE
2083 /* allocate OS speicific structure */
2084 dhdpcie_osinfo = MALLOC(osh, sizeof(dhdpcie_os_info_t));
2085 if (dhdpcie_osinfo == NULL) {
2086 DHD_ERROR(("%s: MALLOC of dhdpcie_os_info_t failed\n",
2087 __FUNCTION__));
2088 break;
2089 }
2090 bzero(dhdpcie_osinfo, sizeof(dhdpcie_os_info_t));
2091 dhdpcie_info->os_cxt = (void *)dhdpcie_osinfo;
2092
2093 /* Initialize host wake IRQ */
2094 spin_lock_init(&dhdpcie_osinfo->oob_irq_spinlock);
2095 /* Get customer specific host wake IRQ parametres: IRQ number as IRQ type */
2096 dhdpcie_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter,
2097 &dhdpcie_osinfo->oob_irq_flags);
2098 if (dhdpcie_osinfo->oob_irq_num < 0) {
2099 DHD_ERROR(("%s: Host OOB irq is not defined\n", __FUNCTION__));
2100 }
2101 #endif /* BCMPCIE_OOB_HOST_WAKE */
2102
2103 #ifdef USE_SMMU_ARCH_MSM
2104 /* allocate private structure for using SMMU */
2105 dhdpcie_smmu_info = MALLOC(osh, sizeof(dhdpcie_smmu_info_t));
2106 if (dhdpcie_smmu_info == NULL) {
2107 DHD_ERROR(("%s: MALLOC of dhdpcie_smmu_info_t failed\n",
2108 __FUNCTION__));
2109 break;
2110 }
2111 bzero(dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t));
2112 dhdpcie_info->smmu_cxt = (void *)dhdpcie_smmu_info;
2113
2114 /* Initialize smmu structure */
2115 if (dhdpcie_smmu_init(pdev, dhdpcie_info->smmu_cxt) < 0) {
2116 DHD_ERROR(("%s: Failed to initialize SMMU\n",
2117 __FUNCTION__));
2118 break;
2119 }
2120 #endif /* USE_SMMU_ARCH_MSM */
2121
2122 #ifdef DHD_WAKE_STATUS
2123 /* Initialize pkt_wake_lock */
2124 spin_lock_init(&dhdpcie_info->pkt_wake_lock);
2125 #endif /* DHD_WAKE_STATUS */
2126
2127 /* Find the PCI resources, verify the */
2128 /* vendor and device ID, map BAR regions and irq, update in structures */
2129 if (dhdpcie_scan_resource(dhdpcie_info)) {
2130 DHD_ERROR(("%s: dhd_Scan_PCI_Res failed\n", __FUNCTION__));
2131
2132 break;
2133 }
2134
2135 /* Bus initialization */
2136 ret = dhdpcie_bus_attach(osh, &bus, dhdpcie_info->regs, dhdpcie_info->tcm, pdev, adapter);
2137 if (ret != BCME_OK) {
2138 DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__));
2139 break;
2140 }
2141
2142 dhdpcie_info->bus = bus;
2143 bus->bar1_size = dhdpcie_info->bar1_size;
2144 bus->is_linkdown = 0;
2145 bus->no_bus_init = FALSE;
2146 bus->cto_triggered = 0;
2147
2148 bus->rc_dev = NULL;
2149
2150 /* Get RC Device Handle */
2151 if (bus->dev->bus) {
2152 /* self member of structure pci_bus is bridge device as seen by parent */
2153 bus->rc_dev = bus->dev->bus->self;
2154 if (bus->rc_dev)
2155 DHD_ERROR(("%s: rc_dev from dev->bus->self (%x:%x) is %pK\n", __FUNCTION__,
2156 bus->rc_dev->vendor, bus->rc_dev->device, bus->rc_dev));
2157 else
2158 DHD_ERROR(("%s: bus->dev->bus->self is NULL\n", __FUNCTION__));
2159 } else {
2160 DHD_ERROR(("%s: unable to get rc_dev as dev->bus is NULL\n", __FUNCTION__));
2161 }
2162
2163 /* if rc_dev is still NULL, try to get from vendor/device IDs */
2164 if (bus->rc_dev == NULL) {
2165 bus->rc_dev = pci_get_device(PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, NULL);
2166 DHD_ERROR(("%s: rc_dev from pci_get_device (%x:%x) is %p\n", __FUNCTION__,
2167 PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, bus->rc_dev));
2168 }
2169
2170 bus->rc_ep_aspm_cap = dhd_bus_is_rc_ep_aspm_capable(bus);
2171 bus->rc_ep_l1ss_cap = dhd_bus_is_rc_ep_l1ss_capable(bus);
2172 DHD_ERROR(("%s: rc_ep_aspm_cap: %d rc_ep_l1ss_cap: %d\n",
2173 __FUNCTION__, bus->rc_ep_aspm_cap, bus->rc_ep_l1ss_cap));
2174
2175 #ifdef FORCE_TPOWERON
2176 if (dhdpcie_chip_req_forced_tpoweron(bus)) {
2177 dhd_bus_set_tpoweron(bus, tpoweron_scale);
2178 }
2179 #endif /* FORCE_TPOWERON */
2180
2181 #if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \
2182 defined(CONFIG_ARCH_APQ8084)
2183 brcm_pcie_wake.wake_irq = wlan_oob_irq;
2184 brcm_pcie_wake.data = bus;
2185 #endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */
2186
2187 #ifdef DONGLE_ENABLE_ISOLATION
2188 bus->dhd->dongle_isolation = TRUE;
2189 #endif /* DONGLE_ENABLE_ISOLATION */
2190 #ifdef SUPPORT_LINKDOWN_RECOVERY
2191 #ifdef CONFIG_ARCH_MSM
2192 bus->pcie_event.events = MSM_PCIE_EVENT_LINKDOWN;
2193 bus->pcie_event.user = pdev;
2194 bus->pcie_event.mode = MSM_PCIE_TRIGGER_CALLBACK;
2195 bus->pcie_event.callback = dhdpcie_linkdown_cb;
2196 bus->pcie_event.options = MSM_PCIE_CONFIG_NO_RECOVERY;
2197 msm_pcie_register_event(&bus->pcie_event);
2198 bus->no_cfg_restore = FALSE;
2199 #endif /* CONFIG_ARCH_MSM */
2200 #ifdef CONFIG_ARCH_EXYNOS
2201 bus->pcie_event.events = EXYNOS_PCIE_EVENT_LINKDOWN;
2202 bus->pcie_event.user = pdev;
2203 bus->pcie_event.mode = EXYNOS_PCIE_TRIGGER_CALLBACK;
2204 bus->pcie_event.callback = dhdpcie_linkdown_cb;
2205 exynos_pcie_register_event(&bus->pcie_event);
2206 #endif /* CONFIG_ARCH_EXYNOS */
2207 bus->read_shm_fail = FALSE;
2208 #endif /* SUPPORT_LINKDOWN_RECOVERY */
2209
2210 if (bus->intr) {
2211 /* Register interrupt callback, but mask it (not operational yet). */
2212 DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
2213 bus->intr_enabled = FALSE;
2214 dhdpcie_bus_intr_disable(bus);
2215
2216 if (dhdpcie_request_irq(dhdpcie_info)) {
2217 DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
2218 break;
2219 }
2220 } else {
2221 bus->pollrate = 1;
2222 DHD_INFO(("%s: PCIe interrupt function is NOT registered "
2223 "due to polling mode\n", __FUNCTION__));
2224 }
2225
2226 #if defined(BCM_REQUEST_FW)
2227 if (dhd_bus_download_firmware(bus, osh, NULL, NULL) < 0) {
2228 DHD_ERROR(("%s: failed to download firmware\n", __FUNCTION__));
2229 }
2230 bus->nv_path = NULL;
2231 bus->fw_path = NULL;
2232 #endif /* BCM_REQUEST_FW */
2233
2234 /* set private data for pci_dev */
2235 pci_set_drvdata(pdev, dhdpcie_info);
2236
2237 /* Ensure BAR1 switch feature enable if needed before FW download */
2238 dhdpcie_bar1_window_switch_enab(bus);
2239
2240 #if defined(BCMDHD_MODULAR) && defined(INSMOD_FW_LOAD)
2241 if (1)
2242 #else
2243 if (dhd_download_fw_on_driverload)
2244 #endif
2245 {
2246 if (dhd_bus_start(bus->dhd)) {
2247 DHD_ERROR(("%s: dhd_bus_start() failed\n", __FUNCTION__));
2248 if (!allow_delay_fwdl)
2249 break;
2250 }
2251 } else {
2252 /* Set ramdom MAC address during boot time */
2253 get_random_bytes(&bus->dhd->mac.octet[3], 3);
2254 /* Adding BRCM OUI */
2255 bus->dhd->mac.octet[0] = 0;
2256 bus->dhd->mac.octet[1] = 0x90;
2257 bus->dhd->mac.octet[2] = 0x4C;
2258 }
2259
2260 /* Attach to the OS network interface */
2261 DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__));
2262 if (dhd_attach_net(bus->dhd, TRUE)) {
2263 DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__));
2264 break;
2265 }
2266
2267 dhdpcie_init_succeeded = TRUE;
2268 #ifdef CONFIG_ARCH_MSM
2269 sec_pcie_set_use_ep_loaded(bus->rc_dev);
2270 #endif /* CONFIG_ARCH_MSM */
2271 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2272 pm_runtime_set_autosuspend_delay(&pdev->dev, AUTO_SUSPEND_TIMEOUT);
2273 pm_runtime_use_autosuspend(&pdev->dev);
2274 atomic_set(&bus->dhd->block_bus, FALSE);
2275 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2276
2277 #if defined(MULTIPLE_SUPPLICANT)
2278 wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
2279 #endif /* MULTIPLE_SUPPLICANT */
2280
2281 DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__));
2282 return 0; /* return SUCCESS */
2283
2284 } while (0);
2285 /* reverse the initialization in order in case of error */
2286
2287 if (bus)
2288 dhdpcie_bus_release(bus);
2289
2290 #ifdef BCMPCIE_OOB_HOST_WAKE
2291 if (dhdpcie_osinfo) {
2292 MFREE(osh, dhdpcie_osinfo, sizeof(dhdpcie_os_info_t));
2293 }
2294 #endif /* BCMPCIE_OOB_HOST_WAKE */
2295
2296 #ifdef USE_SMMU_ARCH_MSM
2297 if (dhdpcie_smmu_info) {
2298 MFREE(osh, dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t));
2299 dhdpcie_info->smmu_cxt = NULL;
2300 }
2301 #endif /* USE_SMMU_ARCH_MSM */
2302
2303 if (dhdpcie_info)
2304 dhdpcie_detach(dhdpcie_info);
2305 if (adapter)
2306 pci_disable_device(pdev);
2307 if (osh)
2308 osl_detach(osh);
2309 if (adapter != NULL) {
2310 adapter->bus_type = -1;
2311 adapter->bus_num = -1;
2312 adapter->slot_num = -1;
2313 }
2314
2315 dhdpcie_init_succeeded = FALSE;
2316
2317 DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
2318
2319 return -1; /* return FAILURE */
2320 }
2321
2322 /* Free Linux irq */
2323 void
dhdpcie_free_irq(dhd_bus_t * bus)2324 dhdpcie_free_irq(dhd_bus_t *bus)
2325 {
2326 struct pci_dev *pdev = NULL;
2327
2328 DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__));
2329 if (bus) {
2330 pdev = bus->dev;
2331 if (bus->irq_registered) {
2332 #if defined(SET_PCIE_IRQ_CPU_CORE) && defined(CONFIG_ARCH_SM8150)
2333 /* clean up the affinity_hint before
2334 * the unregistration of PCIe irq
2335 */
2336 (void)irq_set_affinity_hint(pdev->irq, NULL);
2337 #endif /* SET_PCIE_IRQ_CPU_CORE && CONFIG_ARCH_SM8150 */
2338 free_irq(pdev->irq, bus);
2339 bus->irq_registered = FALSE;
2340 if (bus->d2h_intr_method == PCIE_MSI) {
2341 dhdpcie_disable_msi(pdev);
2342 }
2343 } else {
2344 DHD_ERROR(("%s: PCIe IRQ is not registered\n", __FUNCTION__));
2345 }
2346 }
2347 DHD_TRACE(("%s: Exit\n", __FUNCTION__));
2348 return;
2349 }
2350
2351 /*
2352
2353 Name: dhdpcie_isr
2354
2355 Parametrs:
2356
2357 1: IN int irq -- interrupt vector
2358 2: IN void *arg -- handle to private data structure
2359
2360 Return value:
2361
2362 Status (TRUE or FALSE)
2363
2364 Description:
2365 Interrupt Service routine checks for the status register,
2366 disable interrupt and queue DPC if mail box interrupts are raised.
2367 */
2368
2369 irqreturn_t
dhdpcie_isr(int irq,void * arg)2370 dhdpcie_isr(int irq, void *arg)
2371 {
2372 dhd_bus_t *bus = (dhd_bus_t*)arg;
2373 bus->isr_entry_time = OSL_LOCALTIME_NS();
2374 if (!dhdpcie_bus_isr(bus)) {
2375 DHD_LOG_MEM(("%s: dhdpcie_bus_isr returns with FALSE\n", __FUNCTION__));
2376 }
2377 bus->isr_exit_time = OSL_LOCALTIME_NS();
2378 return IRQ_HANDLED;
2379 }
2380
2381 int
dhdpcie_disable_irq_nosync(dhd_bus_t * bus)2382 dhdpcie_disable_irq_nosync(dhd_bus_t *bus)
2383 {
2384 struct pci_dev *dev;
2385 if ((bus == NULL) || (bus->dev == NULL)) {
2386 DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
2387 return BCME_ERROR;
2388 }
2389
2390 dev = bus->dev;
2391 disable_irq_nosync(dev->irq);
2392 return BCME_OK;
2393 }
2394
2395 int
dhdpcie_disable_irq(dhd_bus_t * bus)2396 dhdpcie_disable_irq(dhd_bus_t *bus)
2397 {
2398 struct pci_dev *dev;
2399 if ((bus == NULL) || (bus->dev == NULL)) {
2400 DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
2401 return BCME_ERROR;
2402 }
2403
2404 dev = bus->dev;
2405 disable_irq(dev->irq);
2406 return BCME_OK;
2407 }
2408
2409 int
dhdpcie_enable_irq(dhd_bus_t * bus)2410 dhdpcie_enable_irq(dhd_bus_t *bus)
2411 {
2412 struct pci_dev *dev;
2413 if ((bus == NULL) || (bus->dev == NULL)) {
2414 DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
2415 return BCME_ERROR;
2416 }
2417
2418 dev = bus->dev;
2419 enable_irq(dev->irq);
2420 return BCME_OK;
2421 }
2422
2423 int
dhdpcie_irq_disabled(dhd_bus_t * bus)2424 dhdpcie_irq_disabled(dhd_bus_t *bus)
2425 {
2426 struct irq_desc *desc = NULL;
2427
2428 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0))
2429 desc = irq_data_to_desc(irq_get_irq_data(bus->dev->irq));
2430 #else
2431 desc = irq_to_desc(bus->dev->irq);
2432 #endif // (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0))
2433 /* depth will be zero, if enabled */
2434 return desc->depth;
2435 }
2436
2437 #if defined(CONFIG_ARCH_EXYNOS)
2438 int pcie_ch_num = EXYNOS_PCIE_CH_NUM;
2439 #endif /* CONFIG_ARCH_EXYNOS */
2440
2441 int
dhdpcie_start_host_dev(dhd_bus_t * bus)2442 dhdpcie_start_host_dev(dhd_bus_t *bus)
2443 {
2444 int ret = 0;
2445 #ifdef CONFIG_ARCH_MSM
2446 #ifdef SUPPORT_LINKDOWN_RECOVERY
2447 int options = 0;
2448 #endif /* SUPPORT_LINKDOWN_RECOVERY */
2449 #endif /* CONFIG_ARCH_MSM */
2450 DHD_TRACE(("%s Enter:\n", __FUNCTION__));
2451
2452 if (bus == NULL) {
2453 return BCME_ERROR;
2454 }
2455
2456 if (bus->dev == NULL) {
2457 return BCME_ERROR;
2458 }
2459
2460 #ifdef CONFIG_ARCH_EXYNOS
2461 exynos_pcie_pm_resume(pcie_ch_num);
2462 #endif /* CONFIG_ARCH_EXYNOS */
2463 #ifdef CONFIG_ARCH_MSM
2464 #ifdef SUPPORT_LINKDOWN_RECOVERY
2465 if (bus->no_cfg_restore) {
2466 options = MSM_PCIE_CONFIG_NO_CFG_RESTORE;
2467 }
2468 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
2469 bus->dev, NULL, options);
2470 if (bus->no_cfg_restore && !ret) {
2471 msm_pcie_recover_config(bus->dev);
2472 bus->no_cfg_restore = 0;
2473 }
2474 #else
2475 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
2476 bus->dev, NULL, 0);
2477 #endif /* SUPPORT_LINKDOWN_RECOVERY */
2478 #endif /* CONFIG_ARCH_MSM */
2479 #ifdef CONFIG_ARCH_TEGRA
2480 ret = tegra_pcie_pm_resume();
2481 #endif /* CONFIG_ARCH_TEGRA */
2482
2483 if (ret) {
2484 DHD_ERROR(("%s Failed to bring up PCIe link\n", __FUNCTION__));
2485 goto done;
2486 }
2487
2488 done:
2489 DHD_TRACE(("%s Exit:\n", __FUNCTION__));
2490 return ret;
2491 }
2492
2493 int
dhdpcie_stop_host_dev(dhd_bus_t * bus)2494 dhdpcie_stop_host_dev(dhd_bus_t *bus)
2495 {
2496 int ret = 0;
2497 #ifdef CONFIG_ARCH_MSM
2498 #ifdef SUPPORT_LINKDOWN_RECOVERY
2499 int options = 0;
2500 #endif /* SUPPORT_LINKDOWN_RECOVERY */
2501 #endif /* CONFIG_ARCH_MSM */
2502
2503 DHD_TRACE(("%s Enter:\n", __FUNCTION__));
2504
2505 if (bus == NULL) {
2506 return BCME_ERROR;
2507 }
2508
2509 if (bus->dev == NULL) {
2510 return BCME_ERROR;
2511 }
2512
2513 #ifdef CONFIG_ARCH_EXYNOS
2514 exynos_pcie_pm_suspend(pcie_ch_num);
2515 #endif /* CONFIG_ARCH_EXYNOS */
2516 #ifdef CONFIG_ARCH_MSM
2517 #ifdef SUPPORT_LINKDOWN_RECOVERY
2518 if (bus->no_cfg_restore) {
2519 options = MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN;
2520 }
2521
2522 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
2523 bus->dev, NULL, options);
2524 #else
2525 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
2526 bus->dev, NULL, 0);
2527 #endif /* SUPPORT_LINKDOWN_RECOVERY */
2528 #endif /* CONFIG_ARCH_MSM */
2529 #ifdef CONFIG_ARCH_TEGRA
2530 ret = tegra_pcie_pm_suspend();
2531 #endif /* CONFIG_ARCH_TEGRA */
2532 if (ret) {
2533 DHD_ERROR(("Failed to stop PCIe link\n"));
2534 goto done;
2535 }
2536 done:
2537 DHD_TRACE(("%s Exit:\n", __FUNCTION__));
2538 return ret;
2539 }
2540
2541 int
dhdpcie_disable_device(dhd_bus_t * bus)2542 dhdpcie_disable_device(dhd_bus_t *bus)
2543 {
2544 DHD_TRACE(("%s Enter:\n", __FUNCTION__));
2545
2546 if (bus == NULL) {
2547 return BCME_ERROR;
2548 }
2549
2550 if (bus->dev == NULL) {
2551 return BCME_ERROR;
2552 }
2553
2554 if (pci_is_enabled(bus->dev))
2555 pci_disable_device(bus->dev);
2556
2557 return 0;
2558 }
2559
2560 int
dhdpcie_enable_device(dhd_bus_t * bus)2561 dhdpcie_enable_device(dhd_bus_t *bus)
2562 {
2563 int ret = BCME_ERROR;
2564 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
2565 dhdpcie_info_t *pch;
2566 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
2567
2568 DHD_TRACE(("%s Enter:\n", __FUNCTION__));
2569
2570 if (bus == NULL) {
2571 return BCME_ERROR;
2572 }
2573
2574 if (bus->dev == NULL) {
2575 return BCME_ERROR;
2576 }
2577
2578 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
2579 pch = pci_get_drvdata(bus->dev);
2580 if (pch == NULL) {
2581 return BCME_ERROR;
2582 }
2583
2584 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && \
2585 (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) && !defined(CONFIG_SOC_EXYNOS8890)
2586 /* Updated with pci_load_and_free_saved_state to compatible
2587 * with Kernel version 3.14.0 to 3.18.41.
2588 */
2589 pci_load_and_free_saved_state(bus->dev, &pch->default_state);
2590 pch->default_state = pci_store_saved_state(bus->dev);
2591 #else
2592 pci_load_saved_state(bus->dev, pch->default_state);
2593 #endif /* LINUX_VERSION >= 3.14.0 && LINUX_VERSION < 3.19.0 && !CONFIG_SOC_EXYNOS8890 */
2594
2595 /* Check if Device ID is valid */
2596 if (bus->dev->state_saved) {
2597 uint32 vid, saved_vid;
2598 pci_read_config_dword(bus->dev, PCI_CFG_VID, &vid);
2599 saved_vid = bus->dev->saved_config_space[PCI_CFG_VID];
2600 if (vid != saved_vid) {
2601 DHD_ERROR(("%s: VID(0x%x) is different from saved VID(0x%x) "
2602 "Skip the bus init\n", __FUNCTION__, vid, saved_vid));
2603 bus->no_bus_init = TRUE;
2604 /* Check if the PCIe link is down */
2605 if (vid == (uint32)-1) {
2606 bus->is_linkdown = 1;
2607 #ifdef SUPPORT_LINKDOWN_RECOVERY
2608 #ifdef CONFIG_ARCH_MSM
2609 bus->no_cfg_restore = TRUE;
2610 #endif /* CONFIG_ARCH_MSM */
2611 #endif /* SUPPORT_LINKDOWN_RECOVERY */
2612 }
2613 return BCME_ERROR;
2614 }
2615 }
2616
2617 pci_restore_state(bus->dev);
2618 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) */
2619
2620 ret = pci_enable_device(bus->dev);
2621 if (ret) {
2622 pci_disable_device(bus->dev);
2623 } else {
2624 pci_set_master(bus->dev);
2625 }
2626
2627 return ret;
2628 }
2629
2630 int
dhdpcie_alloc_resource(dhd_bus_t * bus)2631 dhdpcie_alloc_resource(dhd_bus_t *bus)
2632 {
2633 dhdpcie_info_t *dhdpcie_info;
2634 phys_addr_t bar0_addr, bar1_addr;
2635 ulong bar1_size;
2636
2637 do {
2638 if (bus == NULL) {
2639 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2640 break;
2641 }
2642
2643 if (bus->dev == NULL) {
2644 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2645 break;
2646 }
2647
2648 dhdpcie_info = pci_get_drvdata(bus->dev);
2649 if (dhdpcie_info == NULL) {
2650 DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
2651 break;
2652 }
2653
2654 bar0_addr = pci_resource_start(bus->dev, 0); /* Bar-0 mapped address */
2655 bar1_addr = pci_resource_start(bus->dev, 2); /* Bar-1 mapped address */
2656
2657 /* read Bar-1 mapped memory range */
2658 bar1_size = pci_resource_len(bus->dev, 2);
2659
2660 if ((bar1_size == 0) || (bar1_addr == 0)) {
2661 printf("%s: BAR1 Not enabled for this device size(%ld),"
2662 " addr(0x"PRINTF_RESOURCE")\n",
2663 __FUNCTION__, bar1_size, bar1_addr);
2664 break;
2665 }
2666
2667 dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
2668 if (!dhdpcie_info->regs) {
2669 DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
2670 break;
2671 }
2672
2673 bus->regs = dhdpcie_info->regs;
2674 dhdpcie_info->bar1_size =
2675 (bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
2676 dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->bar1_size);
2677 if (!dhdpcie_info->tcm) {
2678 DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
2679 REG_UNMAP(dhdpcie_info->regs);
2680 bus->regs = NULL;
2681 break;
2682 }
2683
2684 bus->tcm = dhdpcie_info->tcm;
2685 bus->bar1_size = dhdpcie_info->bar1_size;
2686
2687 DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
2688 __FUNCTION__, dhdpcie_info->regs, bar0_addr));
2689 DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n",
2690 __FUNCTION__, dhdpcie_info->tcm, bar1_addr));
2691
2692 return 0;
2693 } while (0);
2694
2695 return BCME_ERROR;
2696 }
2697
2698 void
dhdpcie_free_resource(dhd_bus_t * bus)2699 dhdpcie_free_resource(dhd_bus_t *bus)
2700 {
2701 dhdpcie_info_t *dhdpcie_info;
2702
2703 if (bus == NULL) {
2704 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2705 return;
2706 }
2707
2708 if (bus->dev == NULL) {
2709 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2710 return;
2711 }
2712
2713 dhdpcie_info = pci_get_drvdata(bus->dev);
2714 if (dhdpcie_info == NULL) {
2715 DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
2716 return;
2717 }
2718
2719 if (bus->regs) {
2720 REG_UNMAP(dhdpcie_info->regs);
2721 bus->regs = NULL;
2722 }
2723
2724 if (bus->tcm) {
2725 REG_UNMAP(dhdpcie_info->tcm);
2726 bus->tcm = NULL;
2727 }
2728 }
2729
2730 int
dhdpcie_bus_request_irq(struct dhd_bus * bus)2731 dhdpcie_bus_request_irq(struct dhd_bus *bus)
2732 {
2733 dhdpcie_info_t *dhdpcie_info;
2734 int ret = 0;
2735
2736 if (bus == NULL) {
2737 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2738 return BCME_ERROR;
2739 }
2740
2741 if (bus->dev == NULL) {
2742 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2743 return BCME_ERROR;
2744 }
2745
2746 dhdpcie_info = pci_get_drvdata(bus->dev);
2747 if (dhdpcie_info == NULL) {
2748 DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
2749 return BCME_ERROR;
2750 }
2751
2752 if (bus->intr) {
2753 /* Register interrupt callback, but mask it (not operational yet). */
2754 DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
2755 bus->intr_enabled = FALSE;
2756 dhdpcie_bus_intr_disable(bus);
2757 ret = dhdpcie_request_irq(dhdpcie_info);
2758 if (ret) {
2759 DHD_ERROR(("%s: request_irq() failed, ret=%d\n",
2760 __FUNCTION__, ret));
2761 return ret;
2762 }
2763 }
2764
2765 return ret;
2766 }
2767
2768 #ifdef BCMPCIE_OOB_HOST_WAKE
2769 #ifdef CONFIG_BCMDHD_GET_OOB_STATE
2770 extern int dhd_get_wlan_oob_gpio(void);
2771 #endif /* CONFIG_BCMDHD_GET_OOB_STATE */
2772
dhdpcie_get_oob_irq_level(void)2773 int dhdpcie_get_oob_irq_level(void)
2774 {
2775 int gpio_level;
2776
2777 #ifdef CONFIG_BCMDHD_GET_OOB_STATE
2778 gpio_level = dhd_get_wlan_oob_gpio();
2779 #else
2780 gpio_level = BCME_UNSUPPORTED;
2781 #endif /* CONFIG_BCMDHD_GET_OOB_STATE */
2782 return gpio_level;
2783 }
2784
dhdpcie_get_oob_irq_status(struct dhd_bus * bus)2785 int dhdpcie_get_oob_irq_status(struct dhd_bus *bus)
2786 {
2787 dhdpcie_info_t *pch;
2788 dhdpcie_os_info_t *dhdpcie_osinfo;
2789
2790 if (bus == NULL) {
2791 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2792 return 0;
2793 }
2794
2795 if (bus->dev == NULL) {
2796 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2797 return 0;
2798 }
2799
2800 pch = pci_get_drvdata(bus->dev);
2801 if (pch == NULL) {
2802 DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2803 return 0;
2804 }
2805
2806 dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2807
2808 return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_enabled : 0;
2809 }
2810
dhdpcie_get_oob_irq_num(struct dhd_bus * bus)2811 int dhdpcie_get_oob_irq_num(struct dhd_bus *bus)
2812 {
2813 dhdpcie_info_t *pch;
2814 dhdpcie_os_info_t *dhdpcie_osinfo;
2815
2816 if (bus == NULL) {
2817 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2818 return 0;
2819 }
2820
2821 if (bus->dev == NULL) {
2822 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2823 return 0;
2824 }
2825
2826 pch = pci_get_drvdata(bus->dev);
2827 if (pch == NULL) {
2828 DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2829 return 0;
2830 }
2831
2832 dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2833
2834 return dhdpcie_osinfo ? dhdpcie_osinfo->oob_irq_num : 0;
2835 }
2836
dhdpcie_oob_intr_set(dhd_bus_t * bus,bool enable)2837 void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable)
2838 {
2839 unsigned long flags;
2840 dhdpcie_info_t *pch;
2841 dhdpcie_os_info_t *dhdpcie_osinfo;
2842
2843 if (bus == NULL) {
2844 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2845 return;
2846 }
2847
2848 if (bus->dev == NULL) {
2849 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2850 return;
2851 }
2852
2853 pch = pci_get_drvdata(bus->dev);
2854 if (pch == NULL) {
2855 DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2856 return;
2857 }
2858
2859 dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2860 DHD_OOB_IRQ_LOCK(&dhdpcie_osinfo->oob_irq_spinlock, flags);
2861 if ((dhdpcie_osinfo->oob_irq_enabled != enable) &&
2862 (dhdpcie_osinfo->oob_irq_num > 0)) {
2863 if (enable) {
2864 enable_irq(dhdpcie_osinfo->oob_irq_num);
2865 bus->oob_intr_enable_count++;
2866 bus->last_oob_irq_enable_time = OSL_LOCALTIME_NS();
2867 } else {
2868 disable_irq_nosync(dhdpcie_osinfo->oob_irq_num);
2869 bus->oob_intr_disable_count++;
2870 bus->last_oob_irq_disable_time = OSL_LOCALTIME_NS();
2871 }
2872 dhdpcie_osinfo->oob_irq_enabled = enable;
2873 }
2874 DHD_OOB_IRQ_UNLOCK(&dhdpcie_osinfo->oob_irq_spinlock, flags);
2875 }
2876
2877 #if defined(DHD_USE_SPIN_LOCK_BH) && !defined(DHD_USE_PCIE_OOB_THREADED_IRQ)
2878 #error "Cannot enable DHD_USE_SPIN_LOCK_BH without enabling DHD_USE_PCIE_OOB_THREADED_IRQ"
2879 #endif /* DHD_USE_SPIN_LOCK_BH && !DHD_USE_PCIE_OOB_THREADED_IRQ */
2880
2881 #ifdef DHD_USE_PCIE_OOB_THREADED_IRQ
wlan_oob_irq_isr(int irq,void * data)2882 static irqreturn_t wlan_oob_irq_isr(int irq, void *data)
2883 {
2884 dhd_bus_t *bus = (dhd_bus_t *)data;
2885 DHD_TRACE(("%s: IRQ ISR\n", __FUNCTION__));
2886 bus->last_oob_irq_isr_time = OSL_LOCALTIME_NS();
2887 return IRQ_WAKE_THREAD;
2888 }
2889 #endif /* DHD_USE_PCIE_OOB_THREADED_IRQ */
2890
wlan_oob_irq(int irq,void * data)2891 static irqreturn_t wlan_oob_irq(int irq, void *data)
2892 {
2893 dhd_bus_t *bus;
2894 bus = (dhd_bus_t *)data;
2895 dhdpcie_oob_intr_set(bus, FALSE);
2896 #ifdef DHD_USE_PCIE_OOB_THREADED_IRQ
2897 DHD_TRACE(("%s: IRQ Thread\n", __FUNCTION__));
2898 bus->last_oob_irq_thr_time = OSL_LOCALTIME_NS();
2899 #else
2900 DHD_TRACE(("%s: IRQ ISR\n", __FUNCTION__));
2901 bus->last_oob_irq_isr_time = OSL_LOCALTIME_NS();
2902 #endif /* DHD_USE_PCIE_OOB_THREADED_IRQ */
2903
2904 if (bus->dhd->up == 0) {
2905 DHD_ERROR(("%s: ########### IRQ during dhd pub up is 0 ############\n",
2906 __FUNCTION__));
2907 }
2908
2909 bus->oob_intr_count++;
2910 #ifdef DHD_WAKE_STATUS
2911 #ifdef DHD_PCIE_RUNTIMEPM
2912 /* This condition is for avoiding counting of wake up from Runtime PM */
2913 if (bus->chk_pm)
2914 #endif /* DHD_PCIE_RUNTIMPM */
2915 {
2916 bcmpcie_set_get_wake(bus, 1);
2917 }
2918 #endif /* DHD_WAKE_STATUS */
2919 #ifdef DHD_PCIE_RUNTIMEPM
2920 dhdpcie_runtime_bus_wake(bus->dhd, FALSE, wlan_oob_irq);
2921 #endif /* DHD_PCIE_RUNTIMPM */
2922 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2923 dhd_bus_wakeup_work(bus->dhd);
2924 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2925 /* Hold wakelock if bus_low_power_state is
2926 * DHD_BUS_D3_INFORM_SENT OR DHD_BUS_D3_ACK_RECIEVED
2927 */
2928 if (bus->dhd->up && DHD_CHK_BUS_IN_LPS(bus)) {
2929 DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(bus->dhd, OOB_WAKE_LOCK_TIMEOUT);
2930 }
2931 return IRQ_HANDLED;
2932 }
2933
dhdpcie_oob_intr_register(dhd_bus_t * bus)2934 int dhdpcie_oob_intr_register(dhd_bus_t *bus)
2935 {
2936 int err = 0;
2937 dhdpcie_info_t *pch;
2938 dhdpcie_os_info_t *dhdpcie_osinfo;
2939
2940 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2941 if (bus == NULL) {
2942 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
2943 return -EINVAL;
2944 }
2945
2946 if (bus->dev == NULL) {
2947 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
2948 return -EINVAL;
2949 }
2950
2951 pch = pci_get_drvdata(bus->dev);
2952 if (pch == NULL) {
2953 DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
2954 return -EINVAL;
2955 }
2956
2957 dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2958 if (dhdpcie_osinfo->oob_irq_registered) {
2959 DHD_ERROR(("%s: irq is already registered\n", __FUNCTION__));
2960 return -EBUSY;
2961 }
2962
2963 if (dhdpcie_osinfo->oob_irq_num > 0) {
2964 printf("%s OOB irq=%d flags=0x%X\n", __FUNCTION__,
2965 (int)dhdpcie_osinfo->oob_irq_num,
2966 (int)dhdpcie_osinfo->oob_irq_flags);
2967 #ifdef DHD_USE_PCIE_OOB_THREADED_IRQ
2968 err = request_threaded_irq(dhdpcie_osinfo->oob_irq_num,
2969 wlan_oob_irq_isr, wlan_oob_irq,
2970 dhdpcie_osinfo->oob_irq_flags, "dhdpcie_host_wake",
2971 bus);
2972 #else
2973 err = request_irq(dhdpcie_osinfo->oob_irq_num, wlan_oob_irq,
2974 dhdpcie_osinfo->oob_irq_flags, "dhdpcie_host_wake",
2975 bus);
2976 #endif /* DHD_USE_THREADED_IRQ_PCIE_OOB */
2977 if (err) {
2978 DHD_ERROR(("%s: request_irq failed with %d\n",
2979 __FUNCTION__, err));
2980 return err;
2981 }
2982 #if defined(DISABLE_WOWLAN)
2983 printf("%s: disable_irq_wake\n", __FUNCTION__);
2984 dhdpcie_osinfo->oob_irq_wake_enabled = FALSE;
2985 #else
2986 printf("%s: enable_irq_wake\n", __FUNCTION__);
2987 err = enable_irq_wake(dhdpcie_osinfo->oob_irq_num);
2988 if (!err) {
2989 dhdpcie_osinfo->oob_irq_wake_enabled = TRUE;
2990 } else
2991 printf("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err);
2992 #endif
2993 dhdpcie_osinfo->oob_irq_enabled = TRUE;
2994 }
2995
2996 dhdpcie_osinfo->oob_irq_registered = TRUE;
2997
2998 return 0;
2999 }
3000
dhdpcie_oob_intr_unregister(dhd_bus_t * bus)3001 void dhdpcie_oob_intr_unregister(dhd_bus_t *bus)
3002 {
3003 int err = 0;
3004 dhdpcie_info_t *pch;
3005 dhdpcie_os_info_t *dhdpcie_osinfo;
3006
3007 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3008 if (bus == NULL) {
3009 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
3010 return;
3011 }
3012
3013 if (bus->dev == NULL) {
3014 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
3015 return;
3016 }
3017
3018 pch = pci_get_drvdata(bus->dev);
3019 if (pch == NULL) {
3020 DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
3021 return;
3022 }
3023
3024 dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
3025 if (!dhdpcie_osinfo->oob_irq_registered) {
3026 DHD_ERROR(("%s: irq is not registered\n", __FUNCTION__));
3027 return;
3028 }
3029 if (dhdpcie_osinfo->oob_irq_num > 0) {
3030 if (dhdpcie_osinfo->oob_irq_wake_enabled) {
3031 err = disable_irq_wake(dhdpcie_osinfo->oob_irq_num);
3032 if (!err) {
3033 dhdpcie_osinfo->oob_irq_wake_enabled = FALSE;
3034 }
3035 }
3036 if (dhdpcie_osinfo->oob_irq_enabled) {
3037 disable_irq(dhdpcie_osinfo->oob_irq_num);
3038 dhdpcie_osinfo->oob_irq_enabled = FALSE;
3039 }
3040 free_irq(dhdpcie_osinfo->oob_irq_num, bus);
3041 }
3042 dhdpcie_osinfo->oob_irq_registered = FALSE;
3043 }
3044 #endif /* BCMPCIE_OOB_HOST_WAKE */
3045
3046 #ifdef PCIE_OOB
dhdpcie_oob_init(dhd_bus_t * bus)3047 void dhdpcie_oob_init(dhd_bus_t *bus)
3048 {
3049 /* XXX this should be passed in as a command line parameter */
3050 gpio_handle_val = get_handle(OOB_PORT);
3051 if (gpio_handle_val < 0)
3052 {
3053 DHD_ERROR(("%s: Could not get GPIO handle.\n", __FUNCTION__));
3054 ASSERT(FALSE);
3055 }
3056
3057 gpio_direction = 0;
3058 ftdi_set_bitmode(gpio_handle_val, 0, BITMODE_BITBANG);
3059
3060 /* Note BT core is also enabled here */
3061 gpio_port = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE;
3062 gpio_write_port(gpio_handle_val, gpio_port);
3063
3064 gpio_direction = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE;
3065 ftdi_set_bitmode(gpio_handle_val, gpio_direction, BITMODE_BITBANG);
3066
3067 bus->oob_enabled = TRUE;
3068 bus->oob_presuspend = FALSE;
3069
3070 /* drive the Device_Wake GPIO low on startup */
3071 bus->device_wake_state = TRUE;
3072 dhd_bus_set_device_wake(bus, FALSE);
3073 dhd_bus_doorbell_timeout_reset(bus);
3074
3075 }
3076
3077 void
dhd_oob_set_bt_reg_on(struct dhd_bus * bus,bool val)3078 dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val)
3079 {
3080 DHD_INFO(("Set Device_Wake to %d\n", val));
3081 if (val)
3082 {
3083 gpio_port = gpio_port | (1 << BIT_BT_REG_ON);
3084 gpio_write_port(gpio_handle_val, gpio_port);
3085 } else {
3086 gpio_port = gpio_port & (0xff ^ (1 << BIT_BT_REG_ON));
3087 gpio_write_port(gpio_handle_val, gpio_port);
3088 }
3089 }
3090
3091 int
dhd_oob_get_bt_reg_on(struct dhd_bus * bus)3092 dhd_oob_get_bt_reg_on(struct dhd_bus *bus)
3093 {
3094 int ret;
3095 uint8 val;
3096 ret = gpio_read_port(gpio_handle_val, &val);
3097
3098 if (ret < 0) {
3099 /* XXX handle error properly */
3100 DHD_ERROR(("gpio_read_port returns %d\n", ret));
3101 return ret;
3102 }
3103
3104 if (val & (1 << BIT_BT_REG_ON))
3105 {
3106 ret = 1;
3107 } else {
3108 ret = 0;
3109 }
3110
3111 return ret;
3112 }
3113
3114 int
dhd_os_oob_set_device_wake(struct dhd_bus * bus,bool val)3115 dhd_os_oob_set_device_wake(struct dhd_bus *bus, bool val)
3116 {
3117 if (bus->device_wake_state != val)
3118 {
3119 DHD_INFO(("Set Device_Wake to %d\n", val));
3120
3121 if (bus->oob_enabled && !bus->oob_presuspend)
3122 {
3123 if (val)
3124 {
3125 gpio_port = gpio_port | (1 << DEVICE_WAKE);
3126 gpio_write_port_non_block(gpio_handle_val, gpio_port);
3127 } else {
3128 gpio_port = gpio_port & (0xff ^ (1 << DEVICE_WAKE));
3129 gpio_write_port_non_block(gpio_handle_val, gpio_port);
3130 }
3131 }
3132
3133 bus->device_wake_state = val;
3134 }
3135 return BCME_OK;
3136 }
3137
3138 INLINE void
dhd_os_ib_set_device_wake(struct dhd_bus * bus,bool val)3139 dhd_os_ib_set_device_wake(struct dhd_bus *bus, bool val)
3140 {
3141 /* TODO: Currently Inband implementation of Device_Wake is not supported,
3142 * so this function is left empty later this can be used to support the same.
3143 */
3144 }
3145 #endif /* PCIE_OOB */
3146
3147 #ifdef DHD_PCIE_RUNTIMEPM
dhd_runtimepm_state(dhd_pub_t * dhd)3148 bool dhd_runtimepm_state(dhd_pub_t *dhd)
3149 {
3150 dhd_bus_t *bus;
3151 unsigned long flags;
3152 bus = dhd->bus;
3153
3154 DHD_GENERAL_LOCK(dhd, flags);
3155 bus->idlecount++;
3156
3157 DHD_TRACE(("%s : Enter \n", __FUNCTION__));
3158
3159 if (dhd_query_bus_erros(dhd)) {
3160 /* Becasue bus_error/dongle_trap ... etc,
3161 * driver don't allow enter suspend, return FALSE
3162 */
3163 DHD_GENERAL_UNLOCK(dhd, flags);
3164 return FALSE;
3165 }
3166
3167 if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) {
3168 bus->idlecount = 0;
3169 if (DHD_BUS_BUSY_CHECK_IDLE(dhd) && !DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd) &&
3170 !DHD_CHECK_CFG_IN_PROGRESS(dhd) && !dhd_os_check_wakelock_all(bus->dhd)) {
3171 DHD_RPM(("%s: DHD Idle state!! - idletime :%d, wdtick :%d \n",
3172 __FUNCTION__, bus->idletime, dhd_runtimepm_ms));
3173 bus->bus_wake = 0;
3174 DHD_BUS_BUSY_SET_RPM_SUSPEND_IN_PROGRESS(dhd);
3175 bus->runtime_resume_done = FALSE;
3176 /* stop all interface network queue. */
3177 dhd_bus_stop_queue(bus);
3178 DHD_GENERAL_UNLOCK(dhd, flags);
3179 /* RPM suspend is failed, return FALSE then re-trying */
3180 if (dhdpcie_set_suspend_resume(bus, TRUE)) {
3181 DHD_ERROR(("%s: exit with wakelock \n", __FUNCTION__));
3182 DHD_GENERAL_LOCK(dhd, flags);
3183 DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd);
3184 dhd_os_busbusy_wake(bus->dhd);
3185 bus->runtime_resume_done = TRUE;
3186 /* It can make stuck NET TX Queue without below */
3187 dhd_bus_start_queue(bus);
3188 DHD_GENERAL_UNLOCK(dhd, flags);
3189 if (bus->dhd->rx_pending_due_to_rpm) {
3190 /* Reschedule tasklet to process Rx frames */
3191 DHD_ERROR(("%s: Schedule DPC to process pending"
3192 " Rx packets\n", __FUNCTION__));
3193 /* irq will be enabled at the end of dpc */
3194 dhd_schedule_delayed_dpc_on_dpc_cpu(bus->dhd, 0);
3195 } else {
3196 /* enabling host irq deferred from system suspend */
3197 if (dhdpcie_irq_disabled(bus)) {
3198 dhdpcie_enable_irq(bus);
3199 /* increasing intrrupt count when it enabled */
3200 bus->resume_intr_enable_count++;
3201 }
3202 }
3203 smp_wmb();
3204 wake_up(&bus->rpm_queue);
3205 return FALSE;
3206 }
3207
3208 DHD_GENERAL_LOCK(dhd, flags);
3209 DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd);
3210 DHD_BUS_BUSY_SET_RPM_SUSPEND_DONE(dhd);
3211 /* For making sure NET TX Queue active */
3212 dhd_bus_start_queue(bus);
3213 DHD_GENERAL_UNLOCK(dhd, flags);
3214
3215 wait_event(bus->rpm_queue, bus->bus_wake);
3216
3217 DHD_GENERAL_LOCK(dhd, flags);
3218 DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_DONE(dhd);
3219 DHD_BUS_BUSY_SET_RPM_RESUME_IN_PROGRESS(dhd);
3220 DHD_GENERAL_UNLOCK(dhd, flags);
3221
3222 dhdpcie_set_suspend_resume(bus, FALSE);
3223
3224 DHD_GENERAL_LOCK(dhd, flags);
3225 DHD_BUS_BUSY_CLEAR_RPM_RESUME_IN_PROGRESS(dhd);
3226 dhd_os_busbusy_wake(bus->dhd);
3227 /* Inform the wake up context that Resume is over */
3228 bus->runtime_resume_done = TRUE;
3229 /* For making sure NET TX Queue active */
3230 dhd_bus_start_queue(bus);
3231 DHD_GENERAL_UNLOCK(dhd, flags);
3232
3233 if (bus->dhd->rx_pending_due_to_rpm) {
3234 /* Reschedule tasklet to process Rx frames */
3235 DHD_ERROR(("%s: Schedule DPC to process pending Rx packets\n",
3236 __FUNCTION__));
3237 bus->rpm_sched_dpc_time = OSL_LOCALTIME_NS();
3238 dhd_sched_dpc(bus->dhd);
3239 }
3240
3241 /* enabling host irq deferred from system suspend */
3242 if (dhdpcie_irq_disabled(bus)) {
3243 dhdpcie_enable_irq(bus);
3244 /* increasing intrrupt count when it enabled */
3245 bus->resume_intr_enable_count++;
3246 }
3247
3248 smp_wmb();
3249 wake_up(&bus->rpm_queue);
3250 DHD_RPM(("%s : runtime resume ended \n", __FUNCTION__));
3251 return TRUE;
3252 } else {
3253 DHD_GENERAL_UNLOCK(dhd, flags);
3254 /* Since one of the contexts are busy (TX, IOVAR or RX)
3255 * we should not suspend
3256 */
3257 DHD_ERROR(("%s : bus is active with dhd_bus_busy_state = 0x%x\n",
3258 __FUNCTION__, dhd->dhd_bus_busy_state));
3259 return FALSE;
3260 }
3261 }
3262
3263 DHD_GENERAL_UNLOCK(dhd, flags);
3264 return FALSE;
3265 } /* dhd_runtimepm_state */
3266
3267 /*
3268 * dhd_runtime_bus_wake
3269 * TRUE - related with runtime pm context
3270 * FALSE - It isn't invloved in runtime pm context
3271 */
dhd_runtime_bus_wake(dhd_bus_t * bus,bool wait,void * func_addr)3272 bool dhd_runtime_bus_wake(dhd_bus_t *bus, bool wait, void *func_addr)
3273 {
3274 unsigned long flags;
3275 bus->idlecount = 0;
3276 DHD_TRACE(("%s : enter\n", __FUNCTION__));
3277 if (bus->dhd->up == FALSE) {
3278 DHD_INFO(("%s : dhd is not up\n", __FUNCTION__));
3279 return FALSE;
3280 }
3281
3282 DHD_GENERAL_LOCK(bus->dhd, flags);
3283 if (DHD_BUS_BUSY_CHECK_RPM_ALL(bus->dhd)) {
3284 /* Wake up RPM state thread if it is suspend in progress or suspended */
3285 if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(bus->dhd) ||
3286 DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) {
3287 bus->bus_wake = 1;
3288
3289 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3290
3291 if (dhd_msg_level & DHD_RPM_VAL)
3292 DHD_ERROR_RLMT(("%s: Runtime Resume is called in %pf\n", __FUNCTION__, func_addr));
3293 smp_wmb();
3294 wake_up(&bus->rpm_queue);
3295 /* No need to wake up the RPM state thread */
3296 } else if (DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(bus->dhd)) {
3297 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3298 }
3299
3300 /* If wait is TRUE, function with wait = TRUE will be wait in here */
3301 if (wait) {
3302 if (!wait_event_timeout(bus->rpm_queue, bus->runtime_resume_done,
3303 msecs_to_jiffies(RPM_WAKE_UP_TIMEOUT))) {
3304 DHD_ERROR(("%s: RPM_WAKE_UP_TIMEOUT error\n", __FUNCTION__));
3305 return FALSE;
3306 }
3307 } else {
3308 DHD_INFO(("%s: bus wakeup but no wait until resume done\n", __FUNCTION__));
3309 }
3310 /* If it is called from RPM context, it returns TRUE */
3311 return TRUE;
3312 }
3313
3314 DHD_GENERAL_UNLOCK(bus->dhd, flags);
3315
3316 return FALSE;
3317 }
3318
dhdpcie_runtime_bus_wake(dhd_pub_t * dhdp,bool wait,void * func_addr)3319 bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void* func_addr)
3320 {
3321 dhd_bus_t *bus = dhdp->bus;
3322 return dhd_runtime_bus_wake(bus, wait, func_addr);
3323 }
3324
dhdpcie_block_runtime_pm(dhd_pub_t * dhdp)3325 void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp)
3326 {
3327 dhd_bus_t *bus = dhdp->bus;
3328 bus->idletime = 0;
3329 }
3330
dhdpcie_is_resume_done(dhd_pub_t * dhdp)3331 bool dhdpcie_is_resume_done(dhd_pub_t *dhdp)
3332 {
3333 dhd_bus_t *bus = dhdp->bus;
3334 return bus->runtime_resume_done;
3335 }
3336 #endif /* DHD_PCIE_RUNTIMEPM */
3337
dhd_bus_to_dev(dhd_bus_t * bus)3338 struct device * dhd_bus_to_dev(dhd_bus_t *bus)
3339 {
3340 struct pci_dev *pdev;
3341 pdev = bus->dev;
3342
3343 if (pdev)
3344 return &pdev->dev;
3345 else
3346 return NULL;
3347 }
3348
3349 #ifdef DHD_FW_COREDUMP
3350 int
dhd_dongle_mem_dump(void)3351 dhd_dongle_mem_dump(void)
3352 {
3353 if (!g_dhd_bus) {
3354 DHD_ERROR(("%s: Bus is NULL\n", __FUNCTION__));
3355 return -ENODEV;
3356 }
3357
3358 dhd_bus_dump_console_buffer(g_dhd_bus);
3359 dhd_prot_debug_info_print(g_dhd_bus->dhd);
3360
3361 g_dhd_bus->dhd->memdump_enabled = DUMP_MEMFILE_BUGON;
3362 g_dhd_bus->dhd->memdump_type = DUMP_TYPE_AP_ABNORMAL_ACCESS;
3363
3364 #ifdef DHD_PCIE_RUNTIMEPM
3365 dhdpcie_runtime_bus_wake(g_dhd_bus->dhd, TRUE, __builtin_return_address(0));
3366 #endif /* DHD_PCIE_RUNTIMEPM */
3367
3368 dhd_bus_mem_dump(g_dhd_bus->dhd);
3369 return 0;
3370 }
3371 #ifndef BCMDHD_MDRIVER
3372 EXPORT_SYMBOL(dhd_dongle_mem_dump);
3373 #endif
3374 #endif /* DHD_FW_COREDUMP */
3375
3376 #ifdef CONFIG_ARCH_MSM
3377 void
dhd_bus_inform_ep_loaded_to_rc(dhd_pub_t * dhdp,bool up)3378 dhd_bus_inform_ep_loaded_to_rc(dhd_pub_t *dhdp, bool up)
3379 {
3380 sec_pcie_set_ep_driver_loaded(dhdp->bus->rc_dev, up);
3381 }
3382 #endif /* CONFIG_ARCH_MSM */
3383
3384 bool
dhd_bus_check_driver_up(void)3385 dhd_bus_check_driver_up(void)
3386 {
3387 dhd_bus_t *bus;
3388 dhd_pub_t *dhdp;
3389 bool isup = FALSE;
3390
3391 bus = (dhd_bus_t *)g_dhd_bus;
3392 if (!bus) {
3393 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
3394 return isup;
3395 }
3396
3397 dhdp = bus->dhd;
3398 if (dhdp) {
3399 isup = dhdp->up;
3400 }
3401
3402 return isup;
3403 }
3404 #ifndef BCMDHD_MDRIVER
3405 EXPORT_SYMBOL(dhd_bus_check_driver_up);
3406 #endif
3407