1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Linux DHD Bus Module for PCIE
4 *
5 * Copyright (C) 1999-2017, Broadcom Corporation
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
28 * $Id: dhd_pcie_linux.c 707536 2017-06-28 04:23:48Z $
29 */
30
31
32 /* include files */
33 #include <typedefs.h>
34 #include <bcmutils.h>
35 #include <bcmdevs.h>
36 #include <siutils.h>
37 #include <hndsoc.h>
38 #include <hndpmu.h>
39 #include <sbchipc.h>
40 #if defined(DHD_DEBUG)
41 #include <hnd_armtrap.h>
42 #include <hnd_cons.h>
43 #endif /* defined(DHD_DEBUG) */
44 #include <dngl_stats.h>
45 #include <pcie_core.h>
46 #include <dhd.h>
47 #include <dhd_bus.h>
48 #include <dhd_proto.h>
49 #include <dhd_dbg.h>
50 #include <dhdioctl.h>
51 #include <bcmmsgbuf.h>
52 #include <pcicfg.h>
53 #include <dhd_pcie.h>
54 #include <dhd_linux.h>
55 #ifdef CONFIG_ARCH_MSM
56 #if defined(CONFIG_PCI_MSM) || defined(CONFIG_ARCH_MSM8996)
57 #include <linux/msm_pcie.h>
58 #else
59 #include <mach/msm_pcie.h>
60 #endif /* CONFIG_PCI_MSM */
61 #endif /* CONFIG_ARCH_MSM */
62 #ifdef PCIE_OOB
63 #include "ftdi_sio_external.h"
64 #endif /* PCIE_OOB */
65 #include <linux/irq.h>
66 #ifdef USE_SMMU_ARCH_MSM
67 #include <asm/dma-iommu.h>
68 #include <linux/iommu.h>
69 #include <linux/of.h>
70 #include <linux/platform_device.h>
71 #endif /* USE_SMMU_ARCH_MSM */
72
73 #define PCI_CFG_RETRY 10
74 #define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
75 #define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
76
77 #define OSL_PKTTAG_CLEAR(p) \
78 do { \
79 struct sk_buff *s = (struct sk_buff *)(p); \
80 ASSERT(OSL_PKTTAG_SZ == 32); \
81 *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
82 *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
83 *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
84 *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
85 } while (0)
86
87 #ifdef PCIE_OOB
88 #define HOST_WAKE 4 /* GPIO_0 (HOST_WAKE) - Output from WLAN */
89 #define DEVICE_WAKE 5 /* GPIO_1 (DEVICE_WAKE) - Input to WLAN */
90 #define BIT_WL_REG_ON 6
91 #define BIT_BT_REG_ON 7
92
93 int gpio_handle_val = 0;
94 unsigned char gpio_port = 0;
95 unsigned char gpio_direction = 0;
96 #define OOB_PORT "ttyUSB0"
97 #endif /* PCIE_OOB */
98
99 /* user defined data structures */
100
101 typedef struct dhd_pc_res {
102 uint32 bar0_size;
103 void* bar0_addr;
104 uint32 bar1_size;
105 void* bar1_addr;
106 } pci_config_res, *pPci_config_res;
107
108 typedef bool (*dhdpcie_cb_fn_t)(void *);
109
110 typedef struct dhdpcie_info
111 {
112 dhd_bus_t *bus;
113 osl_t *osh;
114 struct pci_dev *dev; /* pci device handle */
115 volatile char *regs; /* pci device memory va */
116 volatile char *tcm; /* pci device memory va */
117 uint32 tcm_size; /* pci device memory size */
118 struct pcos_info *pcos_info;
119 uint16 last_intrstatus; /* to cache intrstatus */
120 int irq;
121 char pciname[32];
122 struct pci_saved_state* default_state;
123 struct pci_saved_state* state;
124 #ifdef BCMPCIE_OOB_HOST_WAKE
125 void *os_cxt; /* Pointer to per-OS private data */
126 #endif /* BCMPCIE_OOB_HOST_WAKE */
127 #ifdef DHD_WAKE_STATUS
128 spinlock_t pcie_lock;
129 unsigned int total_wake_count;
130 int pkt_wake;
131 int wake_irq;
132 #endif /* DHD_WAKE_STATUS */
133 #ifdef USE_SMMU_ARCH_MSM
134 void *smmu_cxt;
135 #endif /* USE_SMMU_ARCH_MSM */
136 } dhdpcie_info_t;
137
138
139 struct pcos_info {
140 dhdpcie_info_t *pc;
141 spinlock_t lock;
142 wait_queue_head_t intr_wait_queue;
143 timer_list_compat_t tuning_timer;
144 int tuning_timer_exp;
145 atomic_t timer_enab;
146 struct tasklet_struct tuning_tasklet;
147 };
148
149 #ifdef BCMPCIE_OOB_HOST_WAKE
150 typedef struct dhdpcie_os_info {
151 int oob_irq_num; /* valid when hardware or software oob in use */
152 unsigned long oob_irq_flags; /* valid when hardware or software oob in use */
153 bool oob_irq_registered;
154 bool oob_irq_enabled;
155 bool oob_irq_wake_enabled;
156 spinlock_t oob_irq_spinlock;
157 void *dev; /* handle to the underlying device */
158 } dhdpcie_os_info_t;
159 static irqreturn_t wlan_oob_irq(int irq, void *data);
160 #if defined(CUSTOMER_HW2) && defined(CONFIG_ARCH_APQ8084)
161 extern struct brcm_pcie_wake brcm_pcie_wake;
162 #endif /* CUSTOMER_HW2 && CONFIG_ARCH_APQ8084 */
163 #endif /* BCMPCIE_OOB_HOST_WAKE */
164
165 #ifdef USE_SMMU_ARCH_MSM
166 typedef struct dhdpcie_smmu_info {
167 struct dma_iommu_mapping *smmu_mapping;
168 dma_addr_t smmu_iova_start;
169 size_t smmu_iova_len;
170 } dhdpcie_smmu_info_t;
171 #endif /* USE_SMMU_ARCH_MSM */
172
173 /* function declarations */
174 static int __devinit
175 dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
176 static void __devexit
177 dhdpcie_pci_remove(struct pci_dev *pdev);
178 static int dhdpcie_init(struct pci_dev *pdev);
179 static irqreturn_t dhdpcie_isr(int irq, void *arg);
180 /* OS Routine functions for PCI suspend/resume */
181
182 static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state);
183 static int dhdpcie_resume_host_dev(dhd_bus_t *bus);
184 static int dhdpcie_suspend_host_dev(dhd_bus_t *bus);
185 static int dhdpcie_resume_dev(struct pci_dev *dev);
186 static int dhdpcie_suspend_dev(struct pci_dev *dev);
187 #ifdef DHD_PCIE_RUNTIMEPM
188 static int dhdpcie_pm_suspend(struct device *dev);
189 static int dhdpcie_pm_prepare(struct device *dev);
190 static int dhdpcie_pm_resume(struct device *dev);
191 static void dhdpcie_pm_complete(struct device *dev);
192 #else
193 static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state);
194 static int dhdpcie_pci_resume(struct pci_dev *dev);
195 #endif /* DHD_PCIE_RUNTIMEPM */
196
197 static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = {
198 { vendor: 0x14e4,
199 device: PCI_ANY_ID,
200 subvendor: PCI_ANY_ID,
201 subdevice: PCI_ANY_ID,
202 class: PCI_CLASS_NETWORK_OTHER << 8,
203 class_mask: 0xffff00,
204 driver_data: 0,
205 },
206 { 0, 0, 0, 0, 0, 0, 0}
207 };
208 MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid);
209
210 /* Power Management Hooks */
211 #ifdef DHD_PCIE_RUNTIMEPM
212 static const struct dev_pm_ops dhd_pcie_pm_ops = {
213 .prepare = dhdpcie_pm_prepare,
214 .suspend = dhdpcie_pm_suspend,
215 .resume = dhdpcie_pm_resume,
216 .complete = dhdpcie_pm_complete,
217 };
218 #endif /* DHD_PCIE_RUNTIMEPM */
219
220 static struct pci_driver dhdpcie_driver = {
221 node: {&dhdpcie_driver.node, &dhdpcie_driver.node},
222 name: "pcieh",
223 id_table: dhdpcie_pci_devid,
224 probe: dhdpcie_pci_probe,
225 remove: dhdpcie_pci_remove,
226 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
227 save_state: NULL,
228 #endif
229 #ifdef DHD_PCIE_RUNTIMEPM
230 .driver.pm = &dhd_pcie_pm_ops,
231 #else
232 suspend: dhdpcie_pci_suspend,
233 resume: dhdpcie_pci_resume,
234 #endif /* DHD_PCIE_RUNTIMEPM */
235 };
236
237 int dhdpcie_init_succeeded = FALSE;
238
239 #ifdef USE_SMMU_ARCH_MSM
dhdpcie_smmu_init(struct pci_dev * pdev,void * smmu_cxt)240 static int dhdpcie_smmu_init(struct pci_dev *pdev, void *smmu_cxt)
241 {
242 struct dma_iommu_mapping *mapping;
243 struct device_node *root_node = NULL;
244 dhdpcie_smmu_info_t *smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt;
245 int smmu_iova_address[2];
246 char *wlan_node = "android,bcmdhd_wlan";
247 char *wlan_smmu_node = "wlan-smmu-iova-address";
248 int atomic_ctx = 1;
249 int s1_bypass = 1;
250 int ret = 0;
251
252 DHD_ERROR(("%s: SMMU initialize\n", __FUNCTION__));
253
254 root_node = of_find_compatible_node(NULL, NULL, wlan_node);
255 if (!root_node) {
256 WARN(1, "failed to get device node of BRCM WLAN\n");
257 return -ENODEV;
258 }
259
260 if (of_property_read_u32_array(root_node, wlan_smmu_node,
261 smmu_iova_address, 2) == 0) {
262 DHD_ERROR(("%s : get SMMU start address 0x%x, size 0x%x\n",
263 __FUNCTION__, smmu_iova_address[0], smmu_iova_address[1]));
264 smmu_info->smmu_iova_start = smmu_iova_address[0];
265 smmu_info->smmu_iova_len = smmu_iova_address[1];
266 } else {
267 printf("%s : can't get smmu iova address property\n",
268 __FUNCTION__);
269 return -ENODEV;
270 }
271
272 if (smmu_info->smmu_iova_len <= 0) {
273 DHD_ERROR(("%s: Invalid smmu iova len %d\n",
274 __FUNCTION__, (int)smmu_info->smmu_iova_len));
275 return -EINVAL;
276 }
277
278 DHD_ERROR(("%s : SMMU init start\n", __FUNCTION__));
279 mapping = arm_iommu_create_mapping(&platform_bus_type,
280 smmu_info->smmu_iova_start, smmu_info->smmu_iova_len);
281 if (IS_ERR(mapping)) {
282 DHD_ERROR(("%s: create mapping failed, err = %d\n",
283 __FUNCTION__, ret));
284 ret = PTR_ERR(mapping);
285 goto map_fail;
286 }
287
288 ret = iommu_domain_set_attr(mapping->domain,
289 DOMAIN_ATTR_ATOMIC, &atomic_ctx);
290 if (ret) {
291 DHD_ERROR(("%s: set atomic_ctx attribute failed, err = %d\n",
292 __FUNCTION__, ret));
293 goto set_attr_fail;
294 }
295
296 ret = iommu_domain_set_attr(mapping->domain,
297 DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
298 if (ret < 0) {
299 DHD_ERROR(("%s: set s1_bypass attribute failed, err = %d\n",
300 __FUNCTION__, ret));
301 goto set_attr_fail;
302 }
303
304 ret = arm_iommu_attach_device(&pdev->dev, mapping);
305 if (ret) {
306 DHD_ERROR(("%s: attach device failed, err = %d\n",
307 __FUNCTION__, ret));
308 goto attach_fail;
309 }
310
311 smmu_info->smmu_mapping = mapping;
312
313 return ret;
314
315 attach_fail:
316 set_attr_fail:
317 arm_iommu_release_mapping(mapping);
318 map_fail:
319 return ret;
320 }
321
dhdpcie_smmu_remove(struct pci_dev * pdev,void * smmu_cxt)322 static void dhdpcie_smmu_remove(struct pci_dev *pdev, void *smmu_cxt)
323 {
324 dhdpcie_smmu_info_t *smmu_info;
325
326 if (!smmu_cxt) {
327 return;
328 }
329
330 smmu_info = (dhdpcie_smmu_info_t *)smmu_cxt;
331 if (smmu_info->smmu_mapping) {
332 arm_iommu_detach_device(&pdev->dev);
333 arm_iommu_release_mapping(smmu_info->smmu_mapping);
334 smmu_info->smmu_mapping = NULL;
335 }
336 }
337 #endif /* USE_SMMU_ARCH_MSM */
338
339 #ifdef DHD_PCIE_RUNTIMEPM
dhdpcie_pm_suspend(struct device * dev)340 static int dhdpcie_pm_suspend(struct device *dev)
341 {
342 int ret = 0;
343 struct pci_dev *pdev = to_pci_dev(dev);
344 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
345 dhd_bus_t *bus = NULL;
346 unsigned long flags;
347
348 if (pch) {
349 bus = pch->bus;
350 }
351 if (!bus) {
352 return ret;
353 }
354
355 DHD_GENERAL_LOCK(bus->dhd, flags);
356 if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
357 DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n",
358 __FUNCTION__, bus->dhd->dhd_bus_busy_state));
359 DHD_GENERAL_UNLOCK(bus->dhd, flags);
360 return -EBUSY;
361 }
362 DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
363 DHD_GENERAL_UNLOCK(bus->dhd, flags);
364
365 if (!bus->dhd->dongle_reset)
366 ret = dhdpcie_set_suspend_resume(bus, TRUE);
367
368 DHD_GENERAL_LOCK(bus->dhd, flags);
369 DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
370 dhd_os_busbusy_wake(bus->dhd);
371 DHD_GENERAL_UNLOCK(bus->dhd, flags);
372
373 return ret;
374
375 }
376
dhdpcie_pm_prepare(struct device * dev)377 static int dhdpcie_pm_prepare(struct device *dev)
378 {
379 struct pci_dev *pdev = to_pci_dev(dev);
380 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
381 dhd_bus_t *bus = NULL;
382
383 if (pch) {
384 bus = pch->bus;
385 DHD_DISABLE_RUNTIME_PM(bus->dhd);
386 }
387
388 bus->chk_pm = TRUE;
389 return 0;
390 }
391
dhdpcie_pm_resume(struct device * dev)392 static int dhdpcie_pm_resume(struct device *dev)
393 {
394 int ret = 0;
395 struct pci_dev *pdev = to_pci_dev(dev);
396 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
397 dhd_bus_t *bus = NULL;
398 unsigned long flags;
399
400 if (pch) {
401 bus = pch->bus;
402 }
403 if (!bus) {
404 return ret;
405 }
406
407 DHD_GENERAL_LOCK(bus->dhd, flags);
408 DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
409 DHD_GENERAL_UNLOCK(bus->dhd, flags);
410
411 if (!bus->dhd->dongle_reset) {
412 ret = dhdpcie_set_suspend_resume(bus, FALSE);
413 bus->chk_pm = FALSE;
414 }
415
416 DHD_GENERAL_LOCK(bus->dhd, flags);
417 DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
418 dhd_os_busbusy_wake(bus->dhd);
419 DHD_GENERAL_UNLOCK(bus->dhd, flags);
420
421 return ret;
422 }
423
dhdpcie_pm_complete(struct device * dev)424 static void dhdpcie_pm_complete(struct device *dev)
425 {
426 struct pci_dev *pdev = to_pci_dev(dev);
427 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
428 dhd_bus_t *bus = NULL;
429
430 if (pch) {
431 bus = pch->bus;
432 DHD_ENABLE_RUNTIME_PM(bus->dhd);
433 }
434
435 return;
436 }
437 #else
dhdpcie_pci_suspend(struct pci_dev * pdev,pm_message_t state)438 static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state)
439 {
440 int ret = 0;
441 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
442 dhd_bus_t *bus = NULL;
443 unsigned long flags;
444
445 if (pch) {
446 bus = pch->bus;
447 }
448 if (!bus) {
449 return ret;
450 }
451
452 BCM_REFERENCE(state);
453
454 DHD_GENERAL_LOCK(bus->dhd, flags);
455 if (!DHD_BUS_BUSY_CHECK_IDLE(bus->dhd)) {
456 DHD_ERROR(("%s: Bus not IDLE!! dhd_bus_busy_state = 0x%x\n",
457 __FUNCTION__, bus->dhd->dhd_bus_busy_state));
458 DHD_GENERAL_UNLOCK(bus->dhd, flags);
459 return -EBUSY;
460 }
461 DHD_BUS_BUSY_SET_SUSPEND_IN_PROGRESS(bus->dhd);
462 DHD_GENERAL_UNLOCK(bus->dhd, flags);
463
464 if (!bus->dhd->dongle_reset)
465 ret = dhdpcie_set_suspend_resume(bus, TRUE);
466
467 DHD_GENERAL_LOCK(bus->dhd, flags);
468 DHD_BUS_BUSY_CLEAR_SUSPEND_IN_PROGRESS(bus->dhd);
469 dhd_os_busbusy_wake(bus->dhd);
470 DHD_GENERAL_UNLOCK(bus->dhd, flags);
471
472 return ret;
473 }
474
dhdpcie_pci_resume(struct pci_dev * pdev)475 static int dhdpcie_pci_resume(struct pci_dev *pdev)
476 {
477 int ret = 0;
478 dhdpcie_info_t *pch = pci_get_drvdata(pdev);
479 dhd_bus_t *bus = NULL;
480 unsigned long flags;
481
482 if (pch) {
483 bus = pch->bus;
484 }
485 if (!bus) {
486 return ret;
487 }
488
489 DHD_GENERAL_LOCK(bus->dhd, flags);
490 DHD_BUS_BUSY_SET_RESUME_IN_PROGRESS(bus->dhd);
491 DHD_GENERAL_UNLOCK(bus->dhd, flags);
492
493 if (!bus->dhd->dongle_reset)
494 ret = dhdpcie_set_suspend_resume(bus, FALSE);
495
496 DHD_GENERAL_LOCK(bus->dhd, flags);
497 DHD_BUS_BUSY_CLEAR_RESUME_IN_PROGRESS(bus->dhd);
498 dhd_os_busbusy_wake(bus->dhd);
499 DHD_GENERAL_UNLOCK(bus->dhd, flags);
500
501 return ret;
502 }
503
504 #endif /* DHD_PCIE_RUNTIMEPM */
505
dhdpcie_set_suspend_resume(dhd_bus_t * bus,bool state)506 static int dhdpcie_set_suspend_resume(dhd_bus_t *bus, bool state)
507 {
508 int ret = 0;
509
510 ASSERT(bus && !bus->dhd->dongle_reset);
511
512 #ifdef DHD_PCIE_RUNTIMEPM
513 /* if wakelock is held during suspend, return failed */
514 if (state == TRUE && dhd_os_check_wakelock_all(bus->dhd)) {
515 return -EBUSY;
516 }
517 mutex_lock(&bus->pm_lock);
518 #endif /* DHD_PCIE_RUNTIMEPM */
519
520 /* When firmware is not loaded do the PCI bus */
521 /* suspend/resume only */
522 if (bus->dhd->busstate == DHD_BUS_DOWN) {
523 ret = dhdpcie_pci_suspend_resume(bus, state);
524 #ifdef DHD_PCIE_RUNTIMEPM
525 mutex_unlock(&bus->pm_lock);
526 #endif /* DHD_PCIE_RUNTIMEPM */
527 return ret;
528 }
529
530 ret = dhdpcie_bus_suspend(bus, state);
531
532 #ifdef DHD_PCIE_RUNTIMEPM
533 mutex_unlock(&bus->pm_lock);
534 #endif /* DHD_PCIE_RUNTIMEPM */
535
536 return ret;
537 }
538
539 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
540 extern void dhd_dpc_tasklet_kill(dhd_pub_t *dhdp);
541 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
542
dhdpcie_suspend_dev(struct pci_dev * dev)543 static int dhdpcie_suspend_dev(struct pci_dev *dev)
544 {
545 int ret;
546 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
547 dhdpcie_info_t *pch = pci_get_drvdata(dev);
548 dhd_bus_t *bus = pch->bus;
549
550 if (bus->is_linkdown) {
551 DHD_ERROR(("%s: PCIe link is down\n", __FUNCTION__));
552 return BCME_ERROR;
553 }
554 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
555 DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__));
556 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
557 dhd_dpc_tasklet_kill(bus->dhd);
558 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
559 pci_save_state(dev);
560 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
561 pch->state = pci_store_saved_state(dev);
562 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
563 pci_enable_wake(dev, PCI_D0, TRUE);
564 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
565 if (pci_is_enabled(dev))
566 #endif
567 pci_disable_device(dev);
568
569 ret = pci_set_power_state(dev, PCI_D3hot);
570 if (ret) {
571 DHD_ERROR(("%s: pci_set_power_state error %d\n",
572 __FUNCTION__, ret));
573 }
574 // dev->state_saved = FALSE;
575 return ret;
576 }
577
578 #ifdef DHD_WAKE_STATUS
bcmpcie_get_total_wake(struct dhd_bus * bus)579 int bcmpcie_get_total_wake(struct dhd_bus *bus)
580 {
581 dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
582
583 return pch->total_wake_count;
584 }
585
bcmpcie_set_get_wake(struct dhd_bus * bus,int flag)586 int bcmpcie_set_get_wake(struct dhd_bus *bus, int flag)
587 {
588 dhdpcie_info_t *pch = pci_get_drvdata(bus->dev);
589 unsigned long flags;
590 int ret;
591
592 spin_lock_irqsave(&pch->pcie_lock, flags);
593
594 ret = pch->pkt_wake;
595 pch->total_wake_count += flag;
596 pch->pkt_wake = flag;
597
598 spin_unlock_irqrestore(&pch->pcie_lock, flags);
599 return ret;
600 }
601 #endif /* DHD_WAKE_STATUS */
602
dhdpcie_resume_dev(struct pci_dev * dev)603 static int dhdpcie_resume_dev(struct pci_dev *dev)
604 {
605 int err = 0;
606 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
607 dhdpcie_info_t *pch = pci_get_drvdata(dev);
608 pci_load_and_free_saved_state(dev, &pch->state);
609 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
610 DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__));
611 // dev->state_saved = TRUE;
612 pci_restore_state(dev);
613 err = pci_enable_device(dev);
614 if (err) {
615 printf("%s:pci_enable_device error %d \n", __FUNCTION__, err);
616 goto out;
617 }
618 pci_set_master(dev);
619 err = pci_set_power_state(dev, PCI_D0);
620 if (err) {
621 printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err);
622 goto out;
623 }
624
625 out:
626 return err;
627 }
628
dhdpcie_resume_host_dev(dhd_bus_t * bus)629 static int dhdpcie_resume_host_dev(dhd_bus_t *bus)
630 {
631 int bcmerror = 0;
632 #ifdef USE_EXYNOS_PCIE_RC_PMPATCH
633 bcmerror = exynos_pcie_pm_resume(SAMSUNG_PCIE_CH_NUM);
634 #endif /* USE_EXYNOS_PCIE_RC_PMPATCH */
635 #ifdef CONFIG_ARCH_MSM
636 bcmerror = dhdpcie_start_host_pcieclock(bus);
637 #endif /* CONFIG_ARCH_MSM */
638 #ifdef CONFIG_ARCH_TEGRA
639 bcmerror = tegra_pcie_pm_resume();
640 #endif /* CONFIG_ARCH_TEGRA */
641 if (bcmerror < 0) {
642 DHD_ERROR(("%s: PCIe RC resume failed!!! (%d)\n",
643 __FUNCTION__, bcmerror));
644 bus->is_linkdown = 1;
645 #ifdef SUPPORT_LINKDOWN_RECOVERY
646 #ifdef CONFIG_ARCH_MSM
647 bus->no_cfg_restore = 1;
648 #endif /* CONFIG_ARCH_MSM */
649 #endif /* SUPPORT_LINKDOWN_RECOVERY */
650 }
651
652 return bcmerror;
653 }
654
dhdpcie_suspend_host_dev(dhd_bus_t * bus)655 static int dhdpcie_suspend_host_dev(dhd_bus_t *bus)
656 {
657 int bcmerror = 0;
658 #ifdef USE_EXYNOS_PCIE_RC_PMPATCH
659 if (bus->rc_dev) {
660 pci_save_state(bus->rc_dev);
661 } else {
662 DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
663 __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
664 }
665 exynos_pcie_pm_suspend(SAMSUNG_PCIE_CH_NUM);
666 #endif /* USE_EXYNOS_PCIE_RC_PMPATCH */
667 #ifdef CONFIG_ARCH_MSM
668 bcmerror = dhdpcie_stop_host_pcieclock(bus);
669 #endif /* CONFIG_ARCH_MSM */
670 #ifdef CONFIG_ARCH_TEGRA
671 bcmerror = tegra_pcie_pm_suspend();
672 #endif /* CONFIG_ARCH_TEGRA */
673 return bcmerror;
674 }
675
676 #if defined(PCIE_RC_VENDOR_ID) && defined(PCIE_RC_DEVICE_ID)
677 uint32
dhdpcie_rc_config_read(dhd_bus_t * bus,uint offset)678 dhdpcie_rc_config_read(dhd_bus_t *bus, uint offset)
679 {
680 uint val = -1; /* Initialise to 0xfffffff */
681 if (bus->rc_dev) {
682 pci_read_config_dword(bus->rc_dev, offset, &val);
683 OSL_DELAY(100);
684 } else {
685 DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
686 __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
687 }
688 DHD_ERROR(("%s: RC %x:%x offset 0x%x val 0x%x\n",
689 __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, offset, val));
690 return (val);
691 }
692
693 /*
694 * Reads/ Writes the value of capability register
695 * from the given CAP_ID section of PCI Root Port
696 *
697 * Arguements
698 * @bus current dhd_bus_t pointer
699 * @cap Capability or Extended Capability ID to get
700 * @offset offset of Register to Read
701 * @is_ext TRUE if @cap is given for Extended Capability
702 * @is_write is set to TRUE to indicate write
703 * @val value to write
704 *
705 * Return Value
706 * Returns 0xffffffff on error
707 * on write success returns BCME_OK (0)
708 * on Read Success returns the value of register requested
709 * Note: caller shoud ensure valid capability ID and Ext. Capability ID.
710 */
711
712 uint32
dhdpcie_rc_access_cap(dhd_bus_t * bus,int cap,uint offset,bool is_ext,bool is_write,uint32 writeval)713 dhdpcie_rc_access_cap(dhd_bus_t *bus, int cap, uint offset, bool is_ext, bool is_write,
714 uint32 writeval)
715 {
716 int cap_ptr = 0;
717 uint32 ret = -1;
718 uint32 readval;
719
720 if (!(bus->rc_dev)) {
721 DHD_ERROR(("%s: RC %x:%x handle is NULL\n",
722 __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID));
723 return ret;
724 }
725
726 /* Find Capability offset */
727 if (is_ext) {
728 /* removing max EXT_CAP_ID check as
729 * linux kernel definition's max value is not upadted yet as per spec
730 */
731 cap_ptr = pci_find_ext_capability(bus->rc_dev, cap);
732
733 } else {
734 /* removing max PCI_CAP_ID_MAX check as
735 * pervious kernel versions dont have this definition
736 */
737 cap_ptr = pci_find_capability(bus->rc_dev, cap);
738 }
739
740 /* Return if capability with given ID not found */
741 if (cap_ptr == 0) {
742 DHD_ERROR(("%s: RC %x:%x PCI Cap(0x%02x) not supported.\n",
743 __FUNCTION__, PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, cap));
744 return BCME_ERROR;
745 }
746
747 if (is_write) {
748 ret = pci_write_config_dword(bus->rc_dev, (cap_ptr + offset), writeval);
749 if (ret) {
750 DHD_ERROR(("%s: pci_write_config_dword failed. cap=%d offset=%d\n",
751 __FUNCTION__, cap, offset));
752 return BCME_ERROR;
753 }
754 ret = BCME_OK;
755
756 } else {
757
758 ret = pci_read_config_dword(bus->rc_dev, (cap_ptr + offset), &readval);
759
760 if (ret) {
761 DHD_ERROR(("%s: pci_read_config_dword failed. cap=%d offset=%d\n",
762 __FUNCTION__, cap, offset));
763 return BCME_ERROR;
764 }
765 ret = readval;
766 }
767
768 return ret;
769 }
770
771 /* API wrapper to read Root Port link capability
772 * Returns 2 = GEN2 1 = GEN1 BCME_ERR on linkcap not found
773 */
774
dhd_debug_get_rc_linkcap(dhd_bus_t * bus)775 uint32 dhd_debug_get_rc_linkcap(dhd_bus_t *bus)
776 {
777 uint32 linkcap = -1;
778 linkcap = dhdpcie_rc_access_cap(bus, PCIE_CAP_ID_EXP,
779 PCIE_CAP_LINKCAP_OFFSET, FALSE, FALSE, 0);
780 linkcap &= PCIE_CAP_LINKCAP_LNKSPEED_MASK;
781 return linkcap;
782 }
783 #endif
784
dhdpcie_pci_suspend_resume(dhd_bus_t * bus,bool state)785 int dhdpcie_pci_suspend_resume(dhd_bus_t *bus, bool state)
786 {
787 int rc;
788
789 struct pci_dev *dev = bus->dev;
790
791 if (state) {
792 #ifndef BCMPCIE_OOB_HOST_WAKE
793 dhdpcie_pme_active(bus->osh, state);
794 #endif /* !BCMPCIE_OOB_HOST_WAKE */
795 rc = dhdpcie_suspend_dev(dev);
796 if (!rc) {
797 dhdpcie_suspend_host_dev(bus);
798 }
799 } else {
800 dhdpcie_resume_host_dev(bus);
801 rc = dhdpcie_resume_dev(dev);
802 #ifndef BCMPCIE_OOB_HOST_WAKE
803 dhdpcie_pme_active(bus->osh, state);
804 #endif /* !BCMPCIE_OOB_HOST_WAKE */
805 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
806 #if defined(DHD_HANG_SEND_UP_TEST)
807 if (bus->is_linkdown ||
808 bus->dhd->req_hang_type == HANG_REASON_PCIE_RC_LINK_UP_FAIL)
809 #else /* DHD_HANG_SEND_UP_TEST */
810 if (bus->is_linkdown)
811 #endif /* DHD_HANG_SEND_UP_TEST */
812 {
813 bus->dhd->hang_reason = HANG_REASON_PCIE_RC_LINK_UP_FAIL;
814 dhd_os_send_hang_message(bus->dhd);
815 }
816 #endif
817 }
818 return rc;
819 }
820
821 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
dhdpcie_device_scan(struct device * dev,void * data)822 static int dhdpcie_device_scan(struct device *dev, void *data)
823 {
824 struct pci_dev *pcidev;
825 int *cnt = data;
826
827 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
828 #pragma GCC diagnostic push
829 #pragma GCC diagnostic ignored "-Wcast-qual"
830 #endif
831 pcidev = container_of(dev, struct pci_dev, dev);
832 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
833 #pragma GCC diagnostic pop
834 #endif
835 if (pcidev->vendor != 0x14e4)
836 return 0;
837
838 DHD_INFO(("Found Broadcom PCI device 0x%04x\n", pcidev->device));
839 *cnt += 1;
840 if (pcidev->driver && strcmp(pcidev->driver->name, dhdpcie_driver.name))
841 DHD_ERROR(("Broadcom PCI Device 0x%04x has allocated with driver %s\n",
842 pcidev->device, pcidev->driver->name));
843
844 return 0;
845 }
846 #endif /* LINUX_VERSION >= 2.6.0 */
847
848 int
dhdpcie_bus_register(void)849 dhdpcie_bus_register(void)
850 {
851 int error = 0;
852
853
854 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
855 if (!(error = pci_module_init(&dhdpcie_driver)))
856 return 0;
857
858 DHD_ERROR(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error));
859 #else
860 if (!(error = pci_register_driver(&dhdpcie_driver))) {
861 bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error, dhdpcie_device_scan);
862 if (!error) {
863 DHD_ERROR(("No Broadcom PCI device enumerated!\n"));
864 } else if (!dhdpcie_init_succeeded) {
865 DHD_ERROR(("%s: dhdpcie initialize failed.\n", __FUNCTION__));
866 } else {
867 return 0;
868 }
869
870 pci_unregister_driver(&dhdpcie_driver);
871 error = BCME_ERROR;
872 }
873 #endif /* LINUX_VERSION < 2.6.0 */
874
875 return error;
876 }
877
878
879 void
dhdpcie_bus_unregister(void)880 dhdpcie_bus_unregister(void)
881 {
882 pci_unregister_driver(&dhdpcie_driver);
883 }
884
885 int __devinit
dhdpcie_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)886 dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
887 {
888 int err = 0;
889 DHD_MUTEX_LOCK();
890
891 if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) {
892 DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__));
893 err = -ENODEV;
894 goto exit;
895 }
896 printf("PCI_PROBE: bus %X, slot %X,vendor %X, device %X"
897 "(good PCI location)\n", pdev->bus->number,
898 PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device);
899
900 if (dhdpcie_init (pdev)) {
901 DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__));
902 err = -ENODEV;
903 goto exit;
904 }
905
906 #ifdef BCMPCIE_DISABLE_ASYNC_SUSPEND
907 /* disable async suspend */
908 device_disable_async_suspend(&pdev->dev);
909 #endif /* BCMPCIE_DISABLE_ASYNC_SUSPEND */
910
911 DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__));
912
913 exit:
914 DHD_MUTEX_UNLOCK();
915 return err;
916 }
917
918 int
dhdpcie_detach(dhdpcie_info_t * pch)919 dhdpcie_detach(dhdpcie_info_t *pch)
920 {
921 if (pch) {
922 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
923 if (!dhd_download_fw_on_driverload) {
924 pci_load_and_free_saved_state(pch->dev, &pch->default_state);
925 }
926 #endif /* OEM_ANDROID && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
927 MFREE(pch->osh, pch, sizeof(dhdpcie_info_t));
928 }
929 return 0;
930 }
931
932
933 void __devexit
dhdpcie_pci_remove(struct pci_dev * pdev)934 dhdpcie_pci_remove(struct pci_dev *pdev)
935 {
936 osl_t *osh = NULL;
937 dhdpcie_info_t *pch = NULL;
938 dhd_bus_t *bus = NULL;
939
940 DHD_TRACE(("%s Enter\n", __FUNCTION__));
941
942 DHD_MUTEX_LOCK();
943
944 pch = pci_get_drvdata(pdev);
945 bus = pch->bus;
946 osh = pch->osh;
947
948 #ifdef SUPPORT_LINKDOWN_RECOVERY
949 if (bus) {
950 #ifdef CONFIG_ARCH_MSM
951 msm_pcie_deregister_event(&bus->pcie_event);
952 #endif /* CONFIG_ARCH_MSM */
953 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
954 #ifdef CONFIG_SOC_EXYNOS8890
955 exynos_pcie_deregister_event(&bus->pcie_event);
956 #endif /* CONFIG_SOC_EXYNOS8890 */
957 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
958 }
959 #endif /* SUPPORT_LINKDOWN_RECOVERY */
960
961 bus->rc_dev = NULL;
962
963 dhdpcie_bus_release(bus);
964 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
965 if (pci_is_enabled(pdev))
966 #endif
967 pci_disable_device(pdev);
968 #ifdef BCMPCIE_OOB_HOST_WAKE
969 /* pcie os info detach */
970 MFREE(osh, pch->os_cxt, sizeof(dhdpcie_os_info_t));
971 #endif /* BCMPCIE_OOB_HOST_WAKE */
972 #ifdef USE_SMMU_ARCH_MSM
973 /* smmu info detach */
974 dhdpcie_smmu_remove(pdev, pch->smmu_cxt);
975 MFREE(osh, pch->smmu_cxt, sizeof(dhdpcie_smmu_info_t));
976 #endif /* USE_SMMU_ARCH_MSM */
977 /* pcie info detach */
978 dhdpcie_detach(pch);
979 /* osl detach */
980 osl_detach(osh);
981
982 #if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \
983 defined(CONFIG_ARCH_APQ8084)
984 brcm_pcie_wake.wake_irq = NULL;
985 brcm_pcie_wake.data = NULL;
986 #endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */
987
988 dhdpcie_init_succeeded = FALSE;
989
990 DHD_MUTEX_UNLOCK();
991
992 DHD_TRACE(("%s Exit\n", __FUNCTION__));
993
994 return;
995 }
996
997 /* Free Linux irq */
998 int
dhdpcie_request_irq(dhdpcie_info_t * dhdpcie_info)999 dhdpcie_request_irq(dhdpcie_info_t *dhdpcie_info)
1000 {
1001 dhd_bus_t *bus = dhdpcie_info->bus;
1002 struct pci_dev *pdev = dhdpcie_info->bus->dev;
1003 int err = 0;
1004
1005 if (!bus->irq_registered) {
1006 snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname),
1007 "dhdpcie:%s", pci_name(pdev));
1008 #ifdef DHD_USE_MSI
1009 printf("%s: MSI enabled\n", __FUNCTION__);
1010 err = pci_enable_msi(pdev);
1011 if (err < 0) {
1012 DHD_ERROR(("%s: pci_enable_msi() failed, %d, fall back to INTx\n", __FUNCTION__, err));
1013 }
1014 #else
1015 printf("%s: MSI not enabled\n", __FUNCTION__);
1016 #endif /* DHD_USE_MSI */
1017 err = request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED,
1018 dhdpcie_info->pciname, bus);
1019 if (err) {
1020 DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
1021 #ifdef DHD_USE_MSI
1022 pci_disable_msi(pdev);
1023 #endif /* DHD_USE_MSI */
1024 return -1;
1025 } else {
1026 bus->irq_registered = TRUE;
1027 }
1028 } else {
1029 DHD_ERROR(("%s: PCI IRQ is already registered\n", __FUNCTION__));
1030 }
1031
1032 if (!dhdpcie_irq_enabled(bus)) {
1033 DHD_ERROR(("%s: PCIe IRQ was disabled, so, enabled it again\n", __FUNCTION__));
1034 dhdpcie_enable_irq(bus);
1035 }
1036
1037 DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname));
1038
1039
1040 return 0; /* SUCCESS */
1041 }
1042
1043 /**
1044 * dhdpcie_get_pcieirq - return pcie irq number to linux-dhd
1045 */
1046 int
dhdpcie_get_pcieirq(struct dhd_bus * bus,unsigned int * irq)1047 dhdpcie_get_pcieirq(struct dhd_bus *bus, unsigned int *irq)
1048 {
1049 struct pci_dev *pdev = bus->dev;
1050
1051 if (!pdev) {
1052 DHD_ERROR(("%s : bus->dev is NULL\n", __FUNCTION__));
1053 return -ENODEV;
1054 }
1055
1056 *irq = pdev->irq;
1057
1058 return 0; /* SUCCESS */
1059 }
1060
1061 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1062 #define PRINTF_RESOURCE "0x%016llx"
1063 #else
1064 #define PRINTF_RESOURCE "0x%08x"
1065 #endif
1066
1067 /*
1068
1069 Name: osl_pci_get_resource
1070
1071 Parametrs:
1072
1073 1: struct pci_dev *pdev -- pci device structure
1074 2: pci_res -- structure containing pci configuration space values
1075
1076
1077 Return value:
1078
1079 int - Status (TRUE or FALSE)
1080
1081 Description:
1082 Access PCI configuration space, retrieve PCI allocated resources , updates in resource structure.
1083
1084 */
dhdpcie_get_resource(dhdpcie_info_t * dhdpcie_info)1085 int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info)
1086 {
1087 phys_addr_t bar0_addr, bar1_addr;
1088 ulong bar1_size;
1089 struct pci_dev *pdev = NULL;
1090 pdev = dhdpcie_info->dev;
1091 #ifdef EXYNOS_PCIE_MODULE_PATCH
1092 pci_restore_state(pdev);
1093 #endif /* EXYNOS_MODULE_PATCH */
1094 do {
1095 if (pci_enable_device(pdev)) {
1096 printf("%s: Cannot enable PCI device\n", __FUNCTION__);
1097 break;
1098 }
1099 pci_set_master(pdev);
1100 bar0_addr = pci_resource_start(pdev, 0); /* Bar-0 mapped address */
1101 bar1_addr = pci_resource_start(pdev, 2); /* Bar-1 mapped address */
1102
1103 /* read Bar-1 mapped memory range */
1104 bar1_size = pci_resource_len(pdev, 2);
1105
1106 if ((bar1_size == 0) || (bar1_addr == 0)) {
1107 printf("%s: BAR1 Not enabled for this device size(%ld),"
1108 " addr(0x"PRINTF_RESOURCE")\n",
1109 __FUNCTION__, bar1_size, bar1_addr);
1110 goto err;
1111 }
1112
1113 dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
1114 dhdpcie_info->tcm_size =
1115 (bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
1116 dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size);
1117
1118 if (!dhdpcie_info->regs || !dhdpcie_info->tcm) {
1119 DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__));
1120 break;
1121 }
1122
1123 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1124 if (!dhd_download_fw_on_driverload) {
1125 /* Backup PCIe configuration so as to use Wi-Fi on/off process
1126 * in case of built in driver
1127 */
1128 pci_save_state(pdev);
1129 dhdpcie_info->default_state = pci_store_saved_state(pdev);
1130
1131 if (dhdpcie_info->default_state == NULL) {
1132 DHD_ERROR(("%s pci_store_saved_state returns NULL\n",
1133 __FUNCTION__));
1134 REG_UNMAP(dhdpcie_info->regs);
1135 REG_UNMAP(dhdpcie_info->tcm);
1136 pci_disable_device(pdev);
1137 break;
1138 }
1139 }
1140 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1141
1142 #ifdef EXYNOS_PCIE_MODULE_PATCH
1143 pci_save_state(pdev);
1144 #endif /* EXYNOS_MODULE_PATCH */
1145
1146 DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
1147 __FUNCTION__, dhdpcie_info->regs, bar0_addr));
1148 DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n",
1149 __FUNCTION__, dhdpcie_info->tcm, bar1_addr));
1150
1151 return 0; /* SUCCESS */
1152 } while (0);
1153 err:
1154 return -1; /* FAILURE */
1155 }
1156
dhdpcie_scan_resource(dhdpcie_info_t * dhdpcie_info)1157 int dhdpcie_scan_resource(dhdpcie_info_t *dhdpcie_info)
1158 {
1159
1160 DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
1161
1162 do {
1163 /* define it here only!! */
1164 if (dhdpcie_get_resource (dhdpcie_info)) {
1165 DHD_ERROR(("%s: Failed to get PCI resources\n", __FUNCTION__));
1166 break;
1167 }
1168 DHD_TRACE(("%s:Exit - SUCCESS \n",
1169 __FUNCTION__));
1170
1171 return 0; /* SUCCESS */
1172
1173 } while (0);
1174
1175 DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
1176
1177 return -1; /* FAILURE */
1178
1179 }
1180
1181 #ifdef SUPPORT_LINKDOWN_RECOVERY
1182 #if defined(CONFIG_ARCH_MSM) || (defined(EXYNOS_PCIE_LINKDOWN_RECOVERY) && \
1183 (defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895)))
dhdpcie_linkdown_cb(struct_pcie_notify * noti)1184 void dhdpcie_linkdown_cb(struct_pcie_notify *noti)
1185 {
1186 struct pci_dev *pdev = (struct pci_dev *)noti->user;
1187 dhdpcie_info_t *pch = NULL;
1188
1189 if (pdev) {
1190 pch = pci_get_drvdata(pdev);
1191 if (pch) {
1192 dhd_bus_t *bus = pch->bus;
1193 if (bus) {
1194 dhd_pub_t *dhd = bus->dhd;
1195 if (dhd) {
1196 DHD_ERROR(("%s: Event HANG send up "
1197 "due to PCIe linkdown\n",
1198 __FUNCTION__));
1199 #ifdef CONFIG_ARCH_MSM
1200 bus->no_cfg_restore = 1;
1201 #endif /* CONFIG_ARCH_MSM */
1202 bus->is_linkdown = 1;
1203 DHD_OS_WAKE_LOCK(dhd);
1204 dhd->hang_reason = HANG_REASON_PCIE_LINK_DOWN;
1205 dhd_os_send_hang_message(dhd);
1206 }
1207 }
1208 }
1209 }
1210
1211 }
1212 #endif
1213 /* CONFIG_ARCH_MSM || (EXYNOS_PCIE_LINKDOWN_RECOVERY &&
1214 * (CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895))
1215 */
1216 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1217
dhdpcie_init(struct pci_dev * pdev)1218 int dhdpcie_init(struct pci_dev *pdev)
1219 {
1220
1221 osl_t *osh = NULL;
1222 dhd_bus_t *bus = NULL;
1223 dhdpcie_info_t *dhdpcie_info = NULL;
1224 wifi_adapter_info_t *adapter = NULL;
1225 #ifdef BCMPCIE_OOB_HOST_WAKE
1226 dhdpcie_os_info_t *dhdpcie_osinfo = NULL;
1227 #endif /* BCMPCIE_OOB_HOST_WAKE */
1228 #ifdef USE_SMMU_ARCH_MSM
1229 dhdpcie_smmu_info_t *dhdpcie_smmu_info = NULL;
1230 #endif /* USE_SMMU_ARCH_MSM */
1231
1232 do {
1233 /* osl attach */
1234 if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
1235 DHD_ERROR(("%s: osl_attach failed\n", __FUNCTION__));
1236 break;
1237 }
1238
1239 /* initialize static buffer */
1240 adapter = dhd_wifi_platform_get_adapter(PCI_BUS, pdev->bus->number,
1241 PCI_SLOT(pdev->devfn));
1242 if (adapter != NULL) {
1243 DHD_ERROR(("%s: found adapter info '%s'\n", __FUNCTION__, adapter->name));
1244 #ifdef BUS_POWER_RESTORE
1245 adapter->pci_dev = pdev;
1246 #endif
1247 } else
1248 DHD_ERROR(("%s: can't find adapter info for this chip\n", __FUNCTION__));
1249 osl_static_mem_init(osh, adapter);
1250
1251 /* Set ACP coherence flag */
1252 if (OSL_ACP_WAR_ENAB() || OSL_ARCH_IS_COHERENT())
1253 osl_flag_set(osh, OSL_ACP_COHERENCE);
1254
1255 /* allocate linux spcific pcie structure here */
1256 if (!(dhdpcie_info = MALLOC(osh, sizeof(dhdpcie_info_t)))) {
1257 DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
1258 break;
1259 }
1260 bzero(dhdpcie_info, sizeof(dhdpcie_info_t));
1261 dhdpcie_info->osh = osh;
1262 dhdpcie_info->dev = pdev;
1263
1264 #ifdef BCMPCIE_OOB_HOST_WAKE
1265 /* allocate OS speicific structure */
1266 dhdpcie_osinfo = MALLOC(osh, sizeof(dhdpcie_os_info_t));
1267 if (dhdpcie_osinfo == NULL) {
1268 DHD_ERROR(("%s: MALLOC of dhdpcie_os_info_t failed\n",
1269 __FUNCTION__));
1270 break;
1271 }
1272 bzero(dhdpcie_osinfo, sizeof(dhdpcie_os_info_t));
1273 dhdpcie_info->os_cxt = (void *)dhdpcie_osinfo;
1274
1275 /* Initialize host wake IRQ */
1276 spin_lock_init(&dhdpcie_osinfo->oob_irq_spinlock);
1277 /* Get customer specific host wake IRQ parametres: IRQ number as IRQ type */
1278 dhdpcie_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter,
1279 &dhdpcie_osinfo->oob_irq_flags);
1280 if (dhdpcie_osinfo->oob_irq_num < 0) {
1281 DHD_ERROR(("%s: Host OOB irq is not defined\n", __FUNCTION__));
1282 }
1283 #endif /* BCMPCIE_OOB_HOST_WAKE */
1284
1285 #ifdef USE_SMMU_ARCH_MSM
1286 /* allocate private structure for using SMMU */
1287 dhdpcie_smmu_info = MALLOC(osh, sizeof(dhdpcie_smmu_info_t));
1288 if (dhdpcie_smmu_info == NULL) {
1289 DHD_ERROR(("%s: MALLOC of dhdpcie_smmu_info_t failed\n",
1290 __FUNCTION__));
1291 break;
1292 }
1293 bzero(dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t));
1294 dhdpcie_info->smmu_cxt = (void *)dhdpcie_smmu_info;
1295
1296 /* Initialize smmu structure */
1297 if (dhdpcie_smmu_init(pdev, dhdpcie_info->smmu_cxt) < 0) {
1298 DHD_ERROR(("%s: Failed to initialize SMMU\n",
1299 __FUNCTION__));
1300 break;
1301 }
1302 #endif /* USE_SMMU_ARCH_MSM */
1303
1304 #ifdef DHD_WAKE_STATUS
1305 /* Initialize pcie_lock */
1306 spin_lock_init(&dhdpcie_info->pcie_lock);
1307 #endif /* DHD_WAKE_STATUS */
1308
1309 /* Find the PCI resources, verify the */
1310 /* vendor and device ID, map BAR regions and irq, update in structures */
1311 if (dhdpcie_scan_resource(dhdpcie_info)) {
1312 DHD_ERROR(("%s: dhd_Scan_PCI_Res failed\n", __FUNCTION__));
1313
1314 break;
1315 }
1316
1317 /* Bus initialization */
1318 bus = dhdpcie_bus_attach(osh, dhdpcie_info->regs, dhdpcie_info->tcm, pdev);
1319 if (!bus) {
1320 DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__));
1321 break;
1322 }
1323
1324 dhdpcie_info->bus = bus;
1325 bus->is_linkdown = 0;
1326
1327 /* Get RC Device Handle */
1328 #if defined(PCIE_RC_VENDOR_ID) && defined(PCIE_RC_DEVICE_ID)
1329 bus->rc_dev = pci_get_device(PCIE_RC_VENDOR_ID, PCIE_RC_DEVICE_ID, NULL);
1330 #else
1331 bus->rc_dev = NULL;
1332 #endif
1333
1334 #if defined(BCMPCIE_OOB_HOST_WAKE) && defined(CUSTOMER_HW2) && \
1335 defined(CONFIG_ARCH_APQ8084)
1336 brcm_pcie_wake.wake_irq = wlan_oob_irq;
1337 brcm_pcie_wake.data = bus;
1338 #endif /* BCMPCIE_OOB_HOST_WAKE && CUSTOMR_HW2 && CONFIG_ARCH_APQ8084 */
1339
1340 #ifdef DONGLE_ENABLE_ISOLATION
1341 bus->dhd->dongle_isolation = TRUE;
1342 #endif /* DONGLE_ENABLE_ISOLATION */
1343 #ifdef SUPPORT_LINKDOWN_RECOVERY
1344 #ifdef CONFIG_ARCH_MSM
1345 bus->pcie_event.events = MSM_PCIE_EVENT_LINKDOWN;
1346 bus->pcie_event.user = pdev;
1347 bus->pcie_event.mode = MSM_PCIE_TRIGGER_CALLBACK;
1348 bus->pcie_event.callback = dhdpcie_linkdown_cb;
1349 bus->pcie_event.options = MSM_PCIE_CONFIG_NO_RECOVERY;
1350 msm_pcie_register_event(&bus->pcie_event);
1351 bus->no_cfg_restore = 0;
1352 #endif /* CONFIG_ARCH_MSM */
1353 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
1354 #if defined(CONFIG_SOC_EXYNOS8890) || defined(CONFIG_SOC_EXYNOS8895)
1355 bus->pcie_event.events = EXYNOS_PCIE_EVENT_LINKDOWN;
1356 bus->pcie_event.user = pdev;
1357 bus->pcie_event.mode = EXYNOS_PCIE_TRIGGER_CALLBACK;
1358 bus->pcie_event.callback = dhdpcie_linkdown_cb;
1359 exynos_pcie_register_event(&bus->pcie_event);
1360 #endif /* CONFIG_SOC_EXYNOS8890 || CONFIG_SOC_EXYNOS8895 */
1361 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
1362 bus->read_shm_fail = FALSE;
1363 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1364
1365 if (bus->intr) {
1366 /* Register interrupt callback, but mask it (not operational yet). */
1367 DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
1368 dhdpcie_bus_intr_disable(bus);
1369
1370 if (dhdpcie_request_irq(dhdpcie_info)) {
1371 DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
1372 break;
1373 }
1374 } else {
1375 bus->pollrate = 1;
1376 DHD_INFO(("%s: PCIe interrupt function is NOT registered "
1377 "due to polling mode\n", __FUNCTION__));
1378 }
1379
1380 #if defined(BCM_REQUEST_FW)
1381 if (dhd_bus_download_firmware(bus, osh, NULL, NULL) < 0) {
1382 DHD_ERROR(("%s: failed to download firmware\n", __FUNCTION__));
1383 }
1384 bus->nv_path = NULL;
1385 bus->fw_path = NULL;
1386 #endif /* BCM_REQUEST_FW */
1387
1388 /* set private data for pci_dev */
1389 pci_set_drvdata(pdev, dhdpcie_info);
1390
1391 if (dhd_download_fw_on_driverload) {
1392 if (dhd_bus_start(bus->dhd)) {
1393 DHD_ERROR(("%s: dhd_bud_start() failed\n", __FUNCTION__));
1394 if (!allow_delay_fwdl)
1395 break;
1396 }
1397 } else {
1398 /* Set ramdom MAC address during boot time */
1399 get_random_bytes(&bus->dhd->mac.octet[3], 3);
1400 /* Adding BRCM OUI */
1401 bus->dhd->mac.octet[0] = 0;
1402 bus->dhd->mac.octet[1] = 0x90;
1403 bus->dhd->mac.octet[2] = 0x4C;
1404 }
1405
1406 /* Attach to the OS network interface */
1407 DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__));
1408 if (dhd_register_if(bus->dhd, 0, TRUE)) {
1409 DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__));
1410 break;
1411 }
1412
1413 dhdpcie_init_succeeded = TRUE;
1414
1415 #if defined(MULTIPLE_SUPPLICANT)
1416 wl_android_post_init(); // terence 20120530: fix critical section in dhd_open and dhdsdio_probe
1417 #endif /* MULTIPLE_SUPPLICANT */
1418
1419 DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__));
1420 return 0; /* return SUCCESS */
1421
1422 } while (0);
1423 /* reverse the initialization in order in case of error */
1424
1425 if (bus)
1426 dhdpcie_bus_release(bus);
1427
1428 #ifdef BCMPCIE_OOB_HOST_WAKE
1429 if (dhdpcie_osinfo) {
1430 MFREE(osh, dhdpcie_osinfo, sizeof(dhdpcie_os_info_t));
1431 }
1432 #endif /* BCMPCIE_OOB_HOST_WAKE */
1433
1434 #ifdef USE_SMMU_ARCH_MSM
1435 if (dhdpcie_smmu_info) {
1436 MFREE(osh, dhdpcie_smmu_info, sizeof(dhdpcie_smmu_info_t));
1437 dhdpcie_info->smmu_cxt = NULL;
1438 }
1439 #endif /* USE_SMMU_ARCH_MSM */
1440
1441 if (dhdpcie_info)
1442 dhdpcie_detach(dhdpcie_info);
1443 pci_disable_device(pdev);
1444 if (osh)
1445 osl_detach(osh);
1446
1447 dhdpcie_init_succeeded = FALSE;
1448
1449 DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
1450
1451 return -1; /* return FAILURE */
1452 }
1453
1454 /* Free Linux irq */
1455 void
dhdpcie_free_irq(dhd_bus_t * bus)1456 dhdpcie_free_irq(dhd_bus_t *bus)
1457 {
1458 struct pci_dev *pdev = NULL;
1459
1460 DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__));
1461 if (bus) {
1462 pdev = bus->dev;
1463 if (bus->irq_registered) {
1464 free_irq(pdev->irq, bus);
1465 bus->irq_registered = FALSE;
1466 #ifdef DHD_USE_MSI
1467 pci_disable_msi(pdev);
1468 #endif /* DHD_USE_MSI */
1469 } else {
1470 DHD_ERROR(("%s: PCIe IRQ is not registered\n", __FUNCTION__));
1471 }
1472 }
1473 DHD_TRACE(("%s: Exit\n", __FUNCTION__));
1474 return;
1475 }
1476
1477 /*
1478
1479 Name: dhdpcie_isr
1480
1481 Parametrs:
1482
1483 1: IN int irq -- interrupt vector
1484 2: IN void *arg -- handle to private data structure
1485
1486 Return value:
1487
1488 Status (TRUE or FALSE)
1489
1490 Description:
1491 Interrupt Service routine checks for the status register,
1492 disable interrupt and queue DPC if mail box interrupts are raised.
1493 */
1494
1495
1496 irqreturn_t
dhdpcie_isr(int irq,void * arg)1497 dhdpcie_isr(int irq, void *arg)
1498 {
1499 dhd_bus_t *bus = (dhd_bus_t*)arg;
1500 if (dhdpcie_bus_isr(bus))
1501 return TRUE;
1502 else
1503 return FALSE;
1504 }
1505
1506 int
dhdpcie_disable_irq_nosync(dhd_bus_t * bus)1507 dhdpcie_disable_irq_nosync(dhd_bus_t *bus)
1508 {
1509 struct pci_dev *dev;
1510 if ((bus == NULL) || (bus->dev == NULL)) {
1511 DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
1512 return BCME_ERROR;
1513 }
1514
1515 dev = bus->dev;
1516 disable_irq_nosync(dev->irq);
1517 return BCME_OK;
1518 }
1519
1520 int
dhdpcie_disable_irq(dhd_bus_t * bus)1521 dhdpcie_disable_irq(dhd_bus_t *bus)
1522 {
1523 struct pci_dev *dev;
1524 if ((bus == NULL) || (bus->dev == NULL)) {
1525 DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
1526 return BCME_ERROR;
1527 }
1528
1529 dev = bus->dev;
1530 disable_irq(dev->irq);
1531 return BCME_OK;
1532 }
1533
1534 int
dhdpcie_enable_irq(dhd_bus_t * bus)1535 dhdpcie_enable_irq(dhd_bus_t *bus)
1536 {
1537 struct pci_dev *dev;
1538 if ((bus == NULL) || (bus->dev == NULL)) {
1539 DHD_ERROR(("%s: bus or bus->dev is NULL\n", __FUNCTION__));
1540 return BCME_ERROR;
1541 }
1542
1543 dev = bus->dev;
1544 enable_irq(dev->irq);
1545 return BCME_OK;
1546 }
1547
1548 bool
dhdpcie_irq_enabled(dhd_bus_t * bus)1549 dhdpcie_irq_enabled(dhd_bus_t *bus)
1550 {
1551 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
1552 struct irq_desc *desc = irq_to_desc(bus->dev->irq);
1553 /* depth will be zero, if enabled */
1554 if (!desc->depth) {
1555 DHD_ERROR(("%s: depth:%d\n", __FUNCTION__, desc->depth));
1556 }
1557 return desc->depth ? FALSE : TRUE;
1558 #else
1559 /* return TRUE by default as there is no support for lower versions */
1560 return TRUE;
1561 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1562 }
1563
1564 int
dhdpcie_start_host_pcieclock(dhd_bus_t * bus)1565 dhdpcie_start_host_pcieclock(dhd_bus_t *bus)
1566 {
1567 int ret = 0;
1568 #ifdef CONFIG_ARCH_MSM
1569 #ifdef SUPPORT_LINKDOWN_RECOVERY
1570 int options = 0;
1571 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1572 #endif /* CONFIG_ARCH_MSM */
1573 DHD_TRACE(("%s Enter:\n", __FUNCTION__));
1574
1575 if (bus == NULL) {
1576 return BCME_ERROR;
1577 }
1578
1579 if (bus->dev == NULL) {
1580 return BCME_ERROR;
1581 }
1582
1583 #ifdef CONFIG_ARCH_MSM
1584 #ifdef SUPPORT_LINKDOWN_RECOVERY
1585 if (bus->no_cfg_restore) {
1586 options = MSM_PCIE_CONFIG_NO_CFG_RESTORE;
1587 }
1588 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
1589 bus->dev, NULL, options);
1590 if (bus->no_cfg_restore && !ret) {
1591 msm_pcie_recover_config(bus->dev);
1592 bus->no_cfg_restore = 0;
1593 }
1594 #else
1595 ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
1596 bus->dev, NULL, 0);
1597 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1598 if (ret) {
1599 DHD_ERROR(("%s Failed to bring up PCIe link\n", __FUNCTION__));
1600 goto done;
1601 }
1602
1603 done:
1604 #endif /* CONFIG_ARCH_MSM */
1605 DHD_TRACE(("%s Exit:\n", __FUNCTION__));
1606 return ret;
1607 }
1608
1609 int
dhdpcie_stop_host_pcieclock(dhd_bus_t * bus)1610 dhdpcie_stop_host_pcieclock(dhd_bus_t *bus)
1611 {
1612 int ret = 0;
1613 #ifdef CONFIG_ARCH_MSM
1614 #ifdef SUPPORT_LINKDOWN_RECOVERY
1615 int options = 0;
1616 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1617 #endif /* CONFIG_ARCH_MSM */
1618
1619 DHD_TRACE(("%s Enter:\n", __FUNCTION__));
1620
1621 if (bus == NULL) {
1622 return BCME_ERROR;
1623 }
1624
1625 if (bus->dev == NULL) {
1626 return BCME_ERROR;
1627 }
1628
1629 #ifdef CONFIG_ARCH_MSM
1630 #ifdef SUPPORT_LINKDOWN_RECOVERY
1631 /* Always reset the PCIe host when wifi off */
1632 bus->no_cfg_restore = 1;
1633
1634 if (bus->no_cfg_restore) {
1635 options = MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN;
1636 }
1637
1638 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
1639 bus->dev, NULL, options);
1640 #else
1641 ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
1642 bus->dev, NULL, 0);
1643 #endif /* SUPPORT_LINKDOWN_RECOVERY */
1644 if (ret) {
1645 DHD_ERROR(("Failed to stop PCIe link\n"));
1646 goto done;
1647 }
1648 done:
1649 #endif /* CONFIG_ARCH_MSM */
1650 DHD_TRACE(("%s Exit:\n", __FUNCTION__));
1651 return ret;
1652 }
1653
1654 int
dhdpcie_disable_device(dhd_bus_t * bus)1655 dhdpcie_disable_device(dhd_bus_t *bus)
1656 {
1657 DHD_TRACE(("%s Enter:\n", __FUNCTION__));
1658
1659 if (bus == NULL) {
1660 return BCME_ERROR;
1661 }
1662
1663 if (bus->dev == NULL) {
1664 return BCME_ERROR;
1665 }
1666
1667 pci_disable_device(bus->dev);
1668
1669 return 0;
1670 }
1671
1672 int
dhdpcie_enable_device(dhd_bus_t * bus)1673 dhdpcie_enable_device(dhd_bus_t *bus)
1674 {
1675 int ret = BCME_ERROR;
1676 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1677 dhdpcie_info_t *pch;
1678 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
1679
1680 DHD_TRACE(("%s Enter:\n", __FUNCTION__));
1681
1682 if (bus == NULL) {
1683 return BCME_ERROR;
1684 }
1685
1686 if (bus->dev == NULL) {
1687 return BCME_ERROR;
1688 }
1689
1690 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
1691 pch = pci_get_drvdata(bus->dev);
1692 if (pch == NULL) {
1693 return BCME_ERROR;
1694 }
1695
1696 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && (LINUX_VERSION_CODE < \
1697 KERNEL_VERSION(3, 19, 0)) && !defined(CONFIG_SOC_EXYNOS8890)
1698 /* Updated with pci_load_and_free_saved_state to compatible
1699 * with Kernel version 3.14.0 to 3.18.41.
1700 */
1701 pci_load_and_free_saved_state(bus->dev, &pch->default_state);
1702 pch->default_state = pci_store_saved_state(bus->dev);
1703 #else
1704 pci_load_saved_state(bus->dev, pch->default_state);
1705 #endif /* LINUX_VERSION >= 3.14.0 && LINUX_VERSION < 3.19.0 && !CONFIG_SOC_EXYNOS8890 */
1706
1707 pci_restore_state(bus->dev);
1708 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) */
1709
1710 ret = pci_enable_device(bus->dev);
1711 if (ret) {
1712 pci_disable_device(bus->dev);
1713 } else {
1714 pci_set_master(bus->dev);
1715 }
1716
1717 return ret;
1718 }
1719
1720 int
dhdpcie_alloc_resource(dhd_bus_t * bus)1721 dhdpcie_alloc_resource(dhd_bus_t *bus)
1722 {
1723 dhdpcie_info_t *dhdpcie_info;
1724 phys_addr_t bar0_addr, bar1_addr;
1725 ulong bar1_size;
1726
1727 do {
1728 if (bus == NULL) {
1729 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
1730 break;
1731 }
1732
1733 if (bus->dev == NULL) {
1734 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
1735 break;
1736 }
1737
1738 dhdpcie_info = pci_get_drvdata(bus->dev);
1739 if (dhdpcie_info == NULL) {
1740 DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
1741 break;
1742 }
1743
1744 bar0_addr = pci_resource_start(bus->dev, 0); /* Bar-0 mapped address */
1745 bar1_addr = pci_resource_start(bus->dev, 2); /* Bar-1 mapped address */
1746
1747 /* read Bar-1 mapped memory range */
1748 bar1_size = pci_resource_len(bus->dev, 2);
1749
1750 if ((bar1_size == 0) || (bar1_addr == 0)) {
1751 printf("%s: BAR1 Not enabled for this device size(%ld),"
1752 " addr(0x"PRINTF_RESOURCE")\n",
1753 __FUNCTION__, bar1_size, bar1_addr);
1754 break;
1755 }
1756
1757 dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
1758 if (!dhdpcie_info->regs) {
1759 DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
1760 break;
1761 }
1762
1763 bus->regs = dhdpcie_info->regs;
1764 dhdpcie_info->tcm_size =
1765 (bar1_size > DONGLE_TCM_MAP_SIZE) ? bar1_size : DONGLE_TCM_MAP_SIZE;
1766 dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, dhdpcie_info->tcm_size);
1767 if (!dhdpcie_info->tcm) {
1768 DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
1769 REG_UNMAP(dhdpcie_info->regs);
1770 bus->regs = NULL;
1771 break;
1772 }
1773
1774 bus->tcm = dhdpcie_info->tcm;
1775
1776 DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
1777 __FUNCTION__, dhdpcie_info->regs, bar0_addr));
1778 DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n",
1779 __FUNCTION__, dhdpcie_info->tcm, bar1_addr));
1780
1781 return 0;
1782 } while (0);
1783
1784 return BCME_ERROR;
1785 }
1786
1787 void
dhdpcie_free_resource(dhd_bus_t * bus)1788 dhdpcie_free_resource(dhd_bus_t *bus)
1789 {
1790 dhdpcie_info_t *dhdpcie_info;
1791
1792 if (bus == NULL) {
1793 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
1794 return;
1795 }
1796
1797 if (bus->dev == NULL) {
1798 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
1799 return;
1800 }
1801
1802 dhdpcie_info = pci_get_drvdata(bus->dev);
1803 if (dhdpcie_info == NULL) {
1804 DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
1805 return;
1806 }
1807
1808 if (bus->regs) {
1809 REG_UNMAP(dhdpcie_info->regs);
1810 bus->regs = NULL;
1811 }
1812
1813 if (bus->tcm) {
1814 REG_UNMAP(dhdpcie_info->tcm);
1815 bus->tcm = NULL;
1816 }
1817 }
1818
1819 int
dhdpcie_bus_request_irq(struct dhd_bus * bus)1820 dhdpcie_bus_request_irq(struct dhd_bus *bus)
1821 {
1822 dhdpcie_info_t *dhdpcie_info;
1823 int ret = 0;
1824
1825 if (bus == NULL) {
1826 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
1827 return BCME_ERROR;
1828 }
1829
1830 if (bus->dev == NULL) {
1831 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
1832 return BCME_ERROR;
1833 }
1834
1835 dhdpcie_info = pci_get_drvdata(bus->dev);
1836 if (dhdpcie_info == NULL) {
1837 DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
1838 return BCME_ERROR;
1839 }
1840
1841 if (bus->intr) {
1842 /* Register interrupt callback, but mask it (not operational yet). */
1843 DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
1844 dhdpcie_bus_intr_disable(bus);
1845 ret = dhdpcie_request_irq(dhdpcie_info);
1846 if (ret) {
1847 DHD_ERROR(("%s: request_irq() failed, ret=%d\n",
1848 __FUNCTION__, ret));
1849 return ret;
1850 }
1851 }
1852
1853 return ret;
1854 }
1855
1856 #ifdef BCMPCIE_OOB_HOST_WAKE
dhdpcie_oob_intr_set(dhd_bus_t * bus,bool enable)1857 void dhdpcie_oob_intr_set(dhd_bus_t *bus, bool enable)
1858 {
1859 unsigned long flags;
1860 dhdpcie_info_t *pch;
1861 dhdpcie_os_info_t *dhdpcie_osinfo;
1862
1863 if (bus == NULL) {
1864 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
1865 return;
1866 }
1867
1868 if (bus->dev == NULL) {
1869 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
1870 return;
1871 }
1872
1873 pch = pci_get_drvdata(bus->dev);
1874 if (pch == NULL) {
1875 DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
1876 return;
1877 }
1878
1879 dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
1880 spin_lock_irqsave(&dhdpcie_osinfo->oob_irq_spinlock, flags);
1881 if ((dhdpcie_osinfo->oob_irq_enabled != enable) &&
1882 (dhdpcie_osinfo->oob_irq_num > 0)) {
1883 if (enable) {
1884 enable_irq(dhdpcie_osinfo->oob_irq_num);
1885 } else {
1886 disable_irq_nosync(dhdpcie_osinfo->oob_irq_num);
1887 }
1888 dhdpcie_osinfo->oob_irq_enabled = enable;
1889 }
1890 spin_unlock_irqrestore(&dhdpcie_osinfo->oob_irq_spinlock, flags);
1891 }
1892
wlan_oob_irq(int irq,void * data)1893 static irqreturn_t wlan_oob_irq(int irq, void *data)
1894 {
1895 dhd_bus_t *bus;
1896 DHD_TRACE(("%s: IRQ Triggered\n", __FUNCTION__));
1897 bus = (dhd_bus_t *)data;
1898 dhdpcie_oob_intr_set(bus, FALSE);
1899 #ifdef DHD_WAKE_STATUS
1900 #ifdef DHD_PCIE_RUNTIMEPM
1901 /* This condition is for avoiding counting of wake up from Runtime PM */
1902 if (bus->chk_pm)
1903 #endif /* DHD_PCIE_RUNTIMPM */
1904 {
1905 bcmpcie_set_get_wake(bus, 1);
1906 }
1907 #endif /* DHD_WAKE_STATUS */
1908 #ifdef DHD_PCIE_RUNTIMEPM
1909 dhdpcie_runtime_bus_wake(bus->dhd, FALSE, wlan_oob_irq);
1910 #endif /* DHD_PCIE_RUNTIMPM */
1911 if (bus->dhd->up && bus->oob_presuspend) {
1912 DHD_OS_OOB_IRQ_WAKE_LOCK_TIMEOUT(bus->dhd, OOB_WAKE_LOCK_TIMEOUT);
1913 }
1914 return IRQ_HANDLED;
1915 }
1916
dhdpcie_oob_intr_register(dhd_bus_t * bus)1917 int dhdpcie_oob_intr_register(dhd_bus_t *bus)
1918 {
1919 int err = 0;
1920 dhdpcie_info_t *pch;
1921 dhdpcie_os_info_t *dhdpcie_osinfo;
1922
1923 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1924 if (bus == NULL) {
1925 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
1926 return -EINVAL;
1927 }
1928
1929 if (bus->dev == NULL) {
1930 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
1931 return -EINVAL;
1932 }
1933
1934 pch = pci_get_drvdata(bus->dev);
1935 if (pch == NULL) {
1936 DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
1937 return -EINVAL;
1938 }
1939
1940 dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
1941 if (dhdpcie_osinfo->oob_irq_registered) {
1942 DHD_ERROR(("%s: irq is already registered\n", __FUNCTION__));
1943 return -EBUSY;
1944 }
1945
1946 if (dhdpcie_osinfo->oob_irq_num > 0) {
1947 printf("%s OOB irq=%d flags=0x%X\n", __FUNCTION__,
1948 (int)dhdpcie_osinfo->oob_irq_num,
1949 (int)dhdpcie_osinfo->oob_irq_flags);
1950 err = request_irq(dhdpcie_osinfo->oob_irq_num, wlan_oob_irq,
1951 dhdpcie_osinfo->oob_irq_flags, "dhdpcie_host_wake",
1952 bus);
1953 if (err) {
1954 DHD_ERROR(("%s: request_irq failed with %d\n",
1955 __FUNCTION__, err));
1956 return err;
1957 }
1958 #if defined(DISABLE_WOWLAN)
1959 printf("%s: disable_irq_wake\n", __FUNCTION__);
1960 dhdpcie_osinfo->oob_irq_wake_enabled = FALSE;
1961 #else
1962 printf("%s: enable_irq_wake\n", __FUNCTION__);
1963 err = enable_irq_wake(dhdpcie_osinfo->oob_irq_num);
1964 if (!err) {
1965 dhdpcie_osinfo->oob_irq_wake_enabled = TRUE;
1966 } else
1967 printf("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err);
1968 #endif
1969 dhdpcie_osinfo->oob_irq_enabled = TRUE;
1970 }
1971
1972 dhdpcie_osinfo->oob_irq_registered = TRUE;
1973
1974 return 0;
1975 }
1976
dhdpcie_oob_intr_unregister(dhd_bus_t * bus)1977 void dhdpcie_oob_intr_unregister(dhd_bus_t *bus)
1978 {
1979 int err = 0;
1980 dhdpcie_info_t *pch;
1981 dhdpcie_os_info_t *dhdpcie_osinfo;
1982
1983 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1984 if (bus == NULL) {
1985 DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
1986 return;
1987 }
1988
1989 if (bus->dev == NULL) {
1990 DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
1991 return;
1992 }
1993
1994 pch = pci_get_drvdata(bus->dev);
1995 if (pch == NULL) {
1996 DHD_ERROR(("%s: pch is NULL\n", __FUNCTION__));
1997 return;
1998 }
1999
2000 dhdpcie_osinfo = (dhdpcie_os_info_t *)pch->os_cxt;
2001 if (!dhdpcie_osinfo->oob_irq_registered) {
2002 DHD_ERROR(("%s: irq is not registered\n", __FUNCTION__));
2003 return;
2004 }
2005 if (dhdpcie_osinfo->oob_irq_num > 0) {
2006 if (dhdpcie_osinfo->oob_irq_wake_enabled) {
2007 err = disable_irq_wake(dhdpcie_osinfo->oob_irq_num);
2008 if (!err) {
2009 dhdpcie_osinfo->oob_irq_wake_enabled = FALSE;
2010 }
2011 }
2012 if (dhdpcie_osinfo->oob_irq_enabled) {
2013 disable_irq(dhdpcie_osinfo->oob_irq_num);
2014 dhdpcie_osinfo->oob_irq_enabled = FALSE;
2015 }
2016 free_irq(dhdpcie_osinfo->oob_irq_num, bus);
2017 }
2018 dhdpcie_osinfo->oob_irq_registered = FALSE;
2019 }
2020 #endif /* BCMPCIE_OOB_HOST_WAKE */
2021
2022 #ifdef PCIE_OOB
dhdpcie_oob_init(dhd_bus_t * bus)2023 void dhdpcie_oob_init(dhd_bus_t *bus)
2024 {
2025 gpio_handle_val = get_handle(OOB_PORT);
2026 if (gpio_handle_val < 0)
2027 {
2028 DHD_ERROR(("%s: Could not get GPIO handle.\n", __FUNCTION__));
2029 ASSERT(FALSE);
2030 }
2031
2032 gpio_direction = 0;
2033 ftdi_set_bitmode(gpio_handle_val, 0, BITMODE_BITBANG);
2034
2035 /* Note BT core is also enabled here */
2036 gpio_port = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE;
2037 gpio_write_port(gpio_handle_val, gpio_port);
2038
2039 gpio_direction = 1 << BIT_WL_REG_ON | 1 << BIT_BT_REG_ON | 1 << DEVICE_WAKE;
2040 ftdi_set_bitmode(gpio_handle_val, gpio_direction, BITMODE_BITBANG);
2041
2042 bus->oob_enabled = TRUE;
2043 bus->oob_presuspend = FALSE;
2044
2045 /* drive the Device_Wake GPIO low on startup */
2046 bus->device_wake_state = TRUE;
2047 dhd_bus_set_device_wake(bus, FALSE);
2048 dhd_bus_doorbell_timeout_reset(bus);
2049
2050 }
2051
2052 void
dhd_oob_set_bt_reg_on(struct dhd_bus * bus,bool val)2053 dhd_oob_set_bt_reg_on(struct dhd_bus *bus, bool val)
2054 {
2055 DHD_INFO(("Set Device_Wake to %d\n", val));
2056 if (val)
2057 {
2058 gpio_port = gpio_port | (1 << BIT_BT_REG_ON);
2059 gpio_write_port(gpio_handle_val, gpio_port);
2060 } else {
2061 gpio_port = gpio_port & (0xff ^ (1 << BIT_BT_REG_ON));
2062 gpio_write_port(gpio_handle_val, gpio_port);
2063 }
2064 }
2065
2066 int
dhd_oob_get_bt_reg_on(struct dhd_bus * bus)2067 dhd_oob_get_bt_reg_on(struct dhd_bus *bus)
2068 {
2069 int ret;
2070 uint8 val;
2071 ret = gpio_read_port(gpio_handle_val, &val);
2072
2073 if (ret < 0) {
2074 DHD_ERROR(("gpio_read_port returns %d\n", ret));
2075 return ret;
2076 }
2077
2078 if (val & (1 << BIT_BT_REG_ON))
2079 {
2080 ret = 1;
2081 } else {
2082 ret = 0;
2083 }
2084
2085 return ret;
2086 }
2087
2088 int
dhd_os_oob_set_device_wake(struct dhd_bus * bus,bool val)2089 dhd_os_oob_set_device_wake(struct dhd_bus *bus, bool val)
2090 {
2091 if (bus->device_wake_state != val)
2092 {
2093 DHD_INFO(("Set Device_Wake to %d\n", val));
2094
2095 if (bus->oob_enabled && !bus->oob_presuspend)
2096 {
2097 if (val)
2098 {
2099 gpio_port = gpio_port | (1 << DEVICE_WAKE);
2100 gpio_write_port_non_block(gpio_handle_val, gpio_port);
2101 } else {
2102 gpio_port = gpio_port & (0xff ^ (1 << DEVICE_WAKE));
2103 gpio_write_port_non_block(gpio_handle_val, gpio_port);
2104 }
2105 }
2106
2107 bus->device_wake_state = val;
2108 }
2109 return BCME_OK;
2110 }
2111
2112 INLINE void
dhd_os_ib_set_device_wake(struct dhd_bus * bus,bool val)2113 dhd_os_ib_set_device_wake(struct dhd_bus *bus, bool val)
2114 {
2115 /* TODO: Currently Inband implementation of Device_Wake is not supported,
2116 * so this function is left empty later this can be used to support the same.
2117 */
2118 }
2119 #endif /* PCIE_OOB */
2120
2121 #ifdef DHD_PCIE_RUNTIMEPM
dhd_runtimepm_state(dhd_pub_t * dhd)2122 bool dhd_runtimepm_state(dhd_pub_t *dhd)
2123 {
2124 dhd_bus_t *bus;
2125 unsigned long flags;
2126 bus = dhd->bus;
2127
2128 DHD_GENERAL_LOCK(dhd, flags);
2129
2130 bus->idlecount++;
2131
2132 DHD_TRACE(("%s : Enter \n", __FUNCTION__));
2133 if ((bus->idletime > 0) && (bus->idlecount >= bus->idletime)) {
2134 bus->idlecount = 0;
2135 if (DHD_BUS_BUSY_CHECK_IDLE(dhd) && !DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd)) {
2136 bus->bus_wake = 0;
2137 DHD_BUS_BUSY_SET_RPM_SUSPEND_IN_PROGRESS(dhd);
2138 bus->runtime_resume_done = FALSE;
2139 /* stop all interface network queue. */
2140 dhd_bus_stop_queue(bus);
2141 DHD_GENERAL_UNLOCK(dhd, flags);
2142 DHD_ERROR(("%s: DHD Idle state!! - idletime :%d, wdtick :%d \n",
2143 __FUNCTION__, bus->idletime, dhd_runtimepm_ms));
2144 /* RPM suspend is failed, return FALSE then re-trying */
2145 if (dhdpcie_set_suspend_resume(bus, TRUE)) {
2146 DHD_ERROR(("%s: exit with wakelock \n", __FUNCTION__));
2147 DHD_GENERAL_LOCK(dhd, flags);
2148 DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd);
2149 dhd_os_busbusy_wake(bus->dhd);
2150 bus->runtime_resume_done = TRUE;
2151 /* It can make stuck NET TX Queue without below */
2152 dhd_bus_start_queue(bus);
2153 DHD_GENERAL_UNLOCK(dhd, flags);
2154 smp_wmb();
2155 wake_up_interruptible(&bus->rpm_queue);
2156 return FALSE;
2157 }
2158
2159 DHD_GENERAL_LOCK(dhd, flags);
2160 DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_IN_PROGRESS(dhd);
2161 DHD_BUS_BUSY_SET_RPM_SUSPEND_DONE(dhd);
2162 /* For making sure NET TX Queue active */
2163 dhd_bus_start_queue(bus);
2164 DHD_GENERAL_UNLOCK(dhd, flags);
2165
2166 wait_event_interruptible(bus->rpm_queue, bus->bus_wake);
2167
2168 DHD_GENERAL_LOCK(dhd, flags);
2169 DHD_BUS_BUSY_CLEAR_RPM_SUSPEND_DONE(dhd);
2170 DHD_BUS_BUSY_SET_RPM_RESUME_IN_PROGRESS(dhd);
2171 DHD_GENERAL_UNLOCK(dhd, flags);
2172
2173 dhdpcie_set_suspend_resume(bus, FALSE);
2174
2175 DHD_GENERAL_LOCK(dhd, flags);
2176 DHD_BUS_BUSY_CLEAR_RPM_RESUME_IN_PROGRESS(dhd);
2177 dhd_os_busbusy_wake(bus->dhd);
2178 /* Inform the wake up context that Resume is over */
2179 bus->runtime_resume_done = TRUE;
2180 /* For making sure NET TX Queue active */
2181 dhd_bus_start_queue(bus);
2182 DHD_GENERAL_UNLOCK(dhd, flags);
2183
2184 smp_wmb();
2185 wake_up_interruptible(&bus->rpm_queue);
2186 DHD_ERROR(("%s : runtime resume ended \n", __FUNCTION__));
2187 return TRUE;
2188 } else {
2189 DHD_GENERAL_UNLOCK(dhd, flags);
2190 /* Since one of the contexts are busy (TX, IOVAR or RX)
2191 * we should not suspend
2192 */
2193 DHD_ERROR(("%s : bus is active with dhd_bus_busy_state = 0x%x\n",
2194 __FUNCTION__, dhd->dhd_bus_busy_state));
2195 return FALSE;
2196 }
2197 }
2198
2199 DHD_GENERAL_UNLOCK(dhd, flags);
2200 return FALSE;
2201 } /* dhd_runtimepm_state */
2202
2203 /*
2204 * dhd_runtime_bus_wake
2205 * TRUE - related with runtime pm context
2206 * FALSE - It isn't invloved in runtime pm context
2207 */
dhd_runtime_bus_wake(dhd_bus_t * bus,bool wait,void * func_addr)2208 bool dhd_runtime_bus_wake(dhd_bus_t *bus, bool wait, void *func_addr)
2209 {
2210 unsigned long flags;
2211 bus->idlecount = 0;
2212 DHD_TRACE(("%s : enter\n", __FUNCTION__));
2213 if (bus->dhd->up == FALSE) {
2214 DHD_INFO(("%s : dhd is not up\n", __FUNCTION__));
2215 return FALSE;
2216 }
2217
2218 DHD_GENERAL_LOCK(bus->dhd, flags);
2219 if (DHD_BUS_BUSY_CHECK_RPM_ALL(bus->dhd)) {
2220 /* Wake up RPM state thread if it is suspend in progress or suspended */
2221 if (DHD_BUS_BUSY_CHECK_RPM_SUSPEND_IN_PROGRESS(bus->dhd) ||
2222 DHD_BUS_BUSY_CHECK_RPM_SUSPEND_DONE(bus->dhd)) {
2223 bus->bus_wake = 1;
2224
2225 DHD_GENERAL_UNLOCK(bus->dhd, flags);
2226
2227 DHD_ERROR(("Runtime Resume is called in %pf\n", func_addr));
2228 smp_wmb();
2229 wake_up_interruptible(&bus->rpm_queue);
2230 /* No need to wake up the RPM state thread */
2231 } else if (DHD_BUS_BUSY_CHECK_RPM_RESUME_IN_PROGRESS(bus->dhd)) {
2232 DHD_GENERAL_UNLOCK(bus->dhd, flags);
2233 }
2234
2235 /* If wait is TRUE, function with wait = TRUE will be wait in here */
2236 if (wait) {
2237 wait_event_interruptible(bus->rpm_queue, bus->runtime_resume_done);
2238 } else {
2239 DHD_INFO(("%s: bus wakeup but no wait until resume done\n", __FUNCTION__));
2240 }
2241 /* If it is called from RPM context, it returns TRUE */
2242 return TRUE;
2243 }
2244
2245 DHD_GENERAL_UNLOCK(bus->dhd, flags);
2246
2247 return FALSE;
2248 }
2249
dhdpcie_runtime_bus_wake(dhd_pub_t * dhdp,bool wait,void * func_addr)2250 bool dhdpcie_runtime_bus_wake(dhd_pub_t *dhdp, bool wait, void* func_addr)
2251 {
2252 dhd_bus_t *bus = dhdp->bus;
2253 return dhd_runtime_bus_wake(bus, wait, func_addr);
2254 }
2255
dhdpcie_block_runtime_pm(dhd_pub_t * dhdp)2256 void dhdpcie_block_runtime_pm(dhd_pub_t *dhdp)
2257 {
2258 dhd_bus_t *bus = dhdp->bus;
2259 bus->idletime = 0;
2260 }
2261
dhdpcie_is_resume_done(dhd_pub_t * dhdp)2262 bool dhdpcie_is_resume_done(dhd_pub_t *dhdp)
2263 {
2264 dhd_bus_t *bus = dhdp->bus;
2265 return bus->runtime_resume_done;
2266 }
2267 #endif /* DHD_PCIE_RUNTIMEPM */
2268
dhd_bus_to_dev(dhd_bus_t * bus)2269 struct device * dhd_bus_to_dev(dhd_bus_t *bus)
2270 {
2271 struct pci_dev *pdev;
2272 pdev = bus->dev;
2273
2274 if (pdev)
2275 return &pdev->dev;
2276 else
2277 return NULL;
2278 }
2279
2280 #ifdef HOFFLOAD_MODULES
2281 void
dhd_free_module_memory(struct dhd_bus * bus,struct module_metadata * hmem)2282 dhd_free_module_memory(struct dhd_bus *bus, struct module_metadata *hmem)
2283 {
2284 struct device *dev = &bus->dev->dev;
2285 if (hmem) {
2286 dma_unmap_single(dev, (dma_addr_t) hmem->data_addr, hmem->size, DMA_TO_DEVICE);
2287 kfree(hmem->data);
2288 hmem->data = NULL;
2289 hmem->size = 0;
2290 } else {
2291 DHD_ERROR(("dev:%p pci unmapping error\n", dev));
2292 }
2293 }
2294
2295 void *
dhd_alloc_module_memory(struct dhd_bus * bus,uint32_t size,struct module_metadata * hmem)2296 dhd_alloc_module_memory(struct dhd_bus *bus, uint32_t size, struct module_metadata *hmem)
2297 {
2298 struct device *dev = &bus->dev->dev;
2299 if (!hmem->data) {
2300 hmem->data = kzalloc(size, GFP_KERNEL);
2301 if (!hmem->data) {
2302 DHD_ERROR(("dev:%p mem alloc failure\n", dev));
2303 return NULL;
2304 }
2305 }
2306 hmem->size = size;
2307 DHD_INFO(("module size: 0x%x \n", hmem->size));
2308 hmem->data_addr = (u64) dma_map_single(dev, hmem->data, hmem->size, DMA_TO_DEVICE);
2309 if (dma_mapping_error(dev, hmem->data_addr)) {
2310 DHD_ERROR(("dev:%p dma mapping error\n", dev));
2311 kfree(hmem->data);
2312 hmem->data = NULL;
2313 return hmem->data;
2314 }
2315 return hmem->data;
2316 }
2317 #endif /* HOFFLOAD_MODULES */
2318