1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (c) 2014 Broadcom Corporation
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/firmware.h>
9 #include <linux/pci.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/interrupt.h>
13 #include <linux/bcma/bcma.h>
14 #include <linux/sched.h>
15 #include <linux/io.h>
16 #include <asm/unaligned.h>
17
18 #include <soc.h>
19 #include <chipcommon.h>
20 #include <brcmu_utils.h>
21 #include <brcmu_wifi.h>
22 #include <brcm_hw_ids.h>
23
24 /* Custom brcmf_err() that takes bus arg and passes it further */
25 #define brcmf_err(bus, fmt, ...) \
26 do { \
27 if (IS_ENABLED(CONFIG_BRCMDBG) || \
28 IS_ENABLED(CONFIG_BRCM_TRACING) || \
29 net_ratelimit()) \
30 __brcmf_err(bus, __func__, fmt, ##__VA_ARGS__); \
31 } while (0)
32
33 #include "debug.h"
34 #include "bus.h"
35 #include "commonring.h"
36 #include "msgbuf.h"
37 #include "pcie.h"
38 #include "firmware.h"
39 #include "chip.h"
40 #include "core.h"
41 #include "common.h"
42
43
44 enum brcmf_pcie_state {
45 BRCMFMAC_PCIE_STATE_DOWN,
46 BRCMFMAC_PCIE_STATE_UP
47 };
48
49 BRCMF_FW_DEF(43602, "brcmfmac43602-pcie");
50 BRCMF_FW_DEF(4350, "brcmfmac4350-pcie");
51 BRCMF_FW_DEF(4350C, "brcmfmac4350c2-pcie");
52 BRCMF_FW_DEF(4356, "brcmfmac4356-pcie");
53 BRCMF_FW_DEF(43570, "brcmfmac43570-pcie");
54 BRCMF_FW_DEF(4358, "brcmfmac4358-pcie");
55 BRCMF_FW_DEF(4359, "brcmfmac4359-pcie");
56 BRCMF_FW_DEF(4364, "brcmfmac4364-pcie");
57 BRCMF_FW_DEF(4365B, "brcmfmac4365b-pcie");
58 BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie");
59 BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
60 BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie");
61 BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
62
63 static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
64 BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
65 BRCMF_FW_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C),
66 BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C),
67 BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350),
68 BRCMF_FW_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C),
69 BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
70 BRCMF_FW_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570),
71 BRCMF_FW_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570),
72 BRCMF_FW_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
73 BRCMF_FW_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
74 BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
75 BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0xFFFFFFFF, 4364),
76 BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
77 BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
78 BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
79 BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C),
80 BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C),
81 BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
82 };
83
84 #define BRCMF_PCIE_FW_UP_TIMEOUT 5000 /* msec */
85
86 #define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024)
87
88 /* backplane addres space accessed by BAR0 */
89 #define BRCMF_PCIE_BAR0_WINDOW 0x80
90 #define BRCMF_PCIE_BAR0_REG_SIZE 0x1000
91 #define BRCMF_PCIE_BAR0_WRAPPERBASE 0x70
92
93 #define BRCMF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000
94 #define BRCMF_PCIE_BARO_PCIE_ENUM_OFFSET 0x2000
95
96 #define BRCMF_PCIE_ARMCR4REG_BANKIDX 0x40
97 #define BRCMF_PCIE_ARMCR4REG_BANKPDA 0x4C
98
99 #define BRCMF_PCIE_REG_INTSTATUS 0x90
100 #define BRCMF_PCIE_REG_INTMASK 0x94
101 #define BRCMF_PCIE_REG_SBMBX 0x98
102
103 #define BRCMF_PCIE_REG_LINK_STATUS_CTRL 0xBC
104
105 #define BRCMF_PCIE_PCIE2REG_INTMASK 0x24
106 #define BRCMF_PCIE_PCIE2REG_MAILBOXINT 0x48
107 #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C
108 #define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120
109 #define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124
110 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0 0x140
111 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1 0x144
112
113 #define BRCMF_PCIE2_INTA 0x01
114 #define BRCMF_PCIE2_INTB 0x02
115
116 #define BRCMF_PCIE_INT_0 0x01
117 #define BRCMF_PCIE_INT_1 0x02
118 #define BRCMF_PCIE_INT_DEF (BRCMF_PCIE_INT_0 | \
119 BRCMF_PCIE_INT_1)
120
121 #define BRCMF_PCIE_MB_INT_FN0_0 0x0100
122 #define BRCMF_PCIE_MB_INT_FN0_1 0x0200
123 #define BRCMF_PCIE_MB_INT_D2H0_DB0 0x10000
124 #define BRCMF_PCIE_MB_INT_D2H0_DB1 0x20000
125 #define BRCMF_PCIE_MB_INT_D2H1_DB0 0x40000
126 #define BRCMF_PCIE_MB_INT_D2H1_DB1 0x80000
127 #define BRCMF_PCIE_MB_INT_D2H2_DB0 0x100000
128 #define BRCMF_PCIE_MB_INT_D2H2_DB1 0x200000
129 #define BRCMF_PCIE_MB_INT_D2H3_DB0 0x400000
130 #define BRCMF_PCIE_MB_INT_D2H3_DB1 0x800000
131
132 #define BRCMF_PCIE_MB_INT_D2H_DB (BRCMF_PCIE_MB_INT_D2H0_DB0 | \
133 BRCMF_PCIE_MB_INT_D2H0_DB1 | \
134 BRCMF_PCIE_MB_INT_D2H1_DB0 | \
135 BRCMF_PCIE_MB_INT_D2H1_DB1 | \
136 BRCMF_PCIE_MB_INT_D2H2_DB0 | \
137 BRCMF_PCIE_MB_INT_D2H2_DB1 | \
138 BRCMF_PCIE_MB_INT_D2H3_DB0 | \
139 BRCMF_PCIE_MB_INT_D2H3_DB1)
140
141 #define BRCMF_PCIE_SHARED_VERSION_7 7
142 #define BRCMF_PCIE_MIN_SHARED_VERSION 5
143 #define BRCMF_PCIE_MAX_SHARED_VERSION BRCMF_PCIE_SHARED_VERSION_7
144 #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
145 #define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000
146 #define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000
147 #define BRCMF_PCIE_SHARED_HOSTRDY_DB1 0x10000000
148
149 #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
150 #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
151
152 #define BRCMF_SHARED_MAX_RXBUFPOST_OFFSET 34
153 #define BRCMF_SHARED_RING_BASE_OFFSET 52
154 #define BRCMF_SHARED_RX_DATAOFFSET_OFFSET 36
155 #define BRCMF_SHARED_CONSOLE_ADDR_OFFSET 20
156 #define BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40
157 #define BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44
158 #define BRCMF_SHARED_RING_INFO_ADDR_OFFSET 48
159 #define BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET 52
160 #define BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56
161 #define BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET 64
162 #define BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68
163
164 #define BRCMF_RING_H2D_RING_COUNT_OFFSET 0
165 #define BRCMF_RING_D2H_RING_COUNT_OFFSET 1
166 #define BRCMF_RING_H2D_RING_MEM_OFFSET 4
167 #define BRCMF_RING_H2D_RING_STATE_OFFSET 8
168
169 #define BRCMF_RING_MEM_BASE_ADDR_OFFSET 8
170 #define BRCMF_RING_MAX_ITEM_OFFSET 4
171 #define BRCMF_RING_LEN_ITEMS_OFFSET 6
172 #define BRCMF_RING_MEM_SZ 16
173 #define BRCMF_RING_STATE_SZ 8
174
175 #define BRCMF_DEF_MAX_RXBUFPOST 255
176
177 #define BRCMF_CONSOLE_BUFADDR_OFFSET 8
178 #define BRCMF_CONSOLE_BUFSIZE_OFFSET 12
179 #define BRCMF_CONSOLE_WRITEIDX_OFFSET 16
180
181 #define BRCMF_DMA_D2H_SCRATCH_BUF_LEN 8
182 #define BRCMF_DMA_D2H_RINGUPD_BUF_LEN 1024
183
184 #define BRCMF_D2H_DEV_D3_ACK 0x00000001
185 #define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002
186 #define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004
187 #define BRCMF_D2H_DEV_FWHALT 0x10000000
188
189 #define BRCMF_H2D_HOST_D3_INFORM 0x00000001
190 #define BRCMF_H2D_HOST_DS_ACK 0x00000002
191 #define BRCMF_H2D_HOST_D0_INFORM_IN_USE 0x00000008
192 #define BRCMF_H2D_HOST_D0_INFORM 0x00000010
193
194 #define BRCMF_PCIE_MBDATA_TIMEOUT msecs_to_jiffies(2000)
195
196 #define BRCMF_PCIE_CFGREG_STATUS_CMD 0x4
197 #define BRCMF_PCIE_CFGREG_PM_CSR 0x4C
198 #define BRCMF_PCIE_CFGREG_MSI_CAP 0x58
199 #define BRCMF_PCIE_CFGREG_MSI_ADDR_L 0x5C
200 #define BRCMF_PCIE_CFGREG_MSI_ADDR_H 0x60
201 #define BRCMF_PCIE_CFGREG_MSI_DATA 0x64
202 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC
203 #define BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC
204 #define BRCMF_PCIE_CFGREG_RBAR_CTRL 0x228
205 #define BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248
206 #define BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0
207 #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4
208 #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3
209
210 /* Magic number at a magic location to find RAM size */
211 #define BRCMF_RAMSIZE_MAGIC 0x534d4152 /* SMAR */
212 #define BRCMF_RAMSIZE_OFFSET 0x6c
213
214
215 struct brcmf_pcie_console {
216 u32 base_addr;
217 u32 buf_addr;
218 u32 bufsize;
219 u32 read_idx;
220 u8 log_str[256];
221 u8 log_idx;
222 };
223
224 struct brcmf_pcie_shared_info {
225 u32 tcm_base_address;
226 u32 flags;
227 struct brcmf_pcie_ringbuf *commonrings[BRCMF_NROF_COMMON_MSGRINGS];
228 struct brcmf_pcie_ringbuf *flowrings;
229 u16 max_rxbufpost;
230 u16 max_flowrings;
231 u16 max_submissionrings;
232 u16 max_completionrings;
233 u32 rx_dataoffset;
234 u32 htod_mb_data_addr;
235 u32 dtoh_mb_data_addr;
236 u32 ring_info_addr;
237 struct brcmf_pcie_console console;
238 void *scratch;
239 dma_addr_t scratch_dmahandle;
240 void *ringupd;
241 dma_addr_t ringupd_dmahandle;
242 u8 version;
243 };
244
245 struct brcmf_pcie_core_info {
246 u32 base;
247 u32 wrapbase;
248 };
249
250 struct brcmf_pciedev_info {
251 enum brcmf_pcie_state state;
252 bool in_irq;
253 struct pci_dev *pdev;
254 char fw_name[BRCMF_FW_NAME_LEN];
255 char nvram_name[BRCMF_FW_NAME_LEN];
256 void __iomem *regs;
257 void __iomem *tcm;
258 u32 ram_base;
259 u32 ram_size;
260 struct brcmf_chip *ci;
261 u32 coreid;
262 struct brcmf_pcie_shared_info shared;
263 wait_queue_head_t mbdata_resp_wait;
264 bool mbdata_completed;
265 bool irq_allocated;
266 bool wowl_enabled;
267 u8 dma_idx_sz;
268 void *idxbuf;
269 u32 idxbuf_sz;
270 dma_addr_t idxbuf_dmahandle;
271 u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
272 void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
273 u16 value);
274 struct brcmf_mp_device *settings;
275 };
276
277 struct brcmf_pcie_ringbuf {
278 struct brcmf_commonring commonring;
279 dma_addr_t dma_handle;
280 u32 w_idx_addr;
281 u32 r_idx_addr;
282 struct brcmf_pciedev_info *devinfo;
283 u8 id;
284 };
285
286 /**
287 * struct brcmf_pcie_dhi_ringinfo - dongle/host interface shared ring info
288 *
289 * @ringmem: dongle memory pointer to ring memory location
290 * @h2d_w_idx_ptr: h2d ring write indices dongle memory pointers
291 * @h2d_r_idx_ptr: h2d ring read indices dongle memory pointers
292 * @d2h_w_idx_ptr: d2h ring write indices dongle memory pointers
293 * @d2h_r_idx_ptr: d2h ring read indices dongle memory pointers
294 * @h2d_w_idx_hostaddr: h2d ring write indices host memory pointers
295 * @h2d_r_idx_hostaddr: h2d ring read indices host memory pointers
296 * @d2h_w_idx_hostaddr: d2h ring write indices host memory pointers
297 * @d2h_r_idx_hostaddr: d2h ring reaD indices host memory pointers
298 * @max_flowrings: maximum number of tx flow rings supported.
299 * @max_submissionrings: maximum number of submission rings(h2d) supported.
300 * @max_completionrings: maximum number of completion rings(d2h) supported.
301 */
302 struct brcmf_pcie_dhi_ringinfo {
303 __le32 ringmem;
304 __le32 h2d_w_idx_ptr;
305 __le32 h2d_r_idx_ptr;
306 __le32 d2h_w_idx_ptr;
307 __le32 d2h_r_idx_ptr;
308 struct msgbuf_buf_addr h2d_w_idx_hostaddr;
309 struct msgbuf_buf_addr h2d_r_idx_hostaddr;
310 struct msgbuf_buf_addr d2h_w_idx_hostaddr;
311 struct msgbuf_buf_addr d2h_r_idx_hostaddr;
312 __le16 max_flowrings;
313 __le16 max_submissionrings;
314 __le16 max_completionrings;
315 };
316
317 static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = {
318 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM,
319 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM,
320 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM,
321 BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM,
322 BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM
323 };
324
325 static const u32 brcmf_ring_itemsize_pre_v7[BRCMF_NROF_COMMON_MSGRINGS] = {
326 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
327 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
328 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
329 BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE_PRE_V7,
330 BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE_PRE_V7
331 };
332
333 static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
334 BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE,
335 BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE,
336 BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE,
337 BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE,
338 BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
339 };
340
341 static void brcmf_pcie_setup(struct device *dev, int ret,
342 struct brcmf_fw_request *fwreq);
343 static struct brcmf_fw_request *
344 brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo);
345
346 static u32
brcmf_pcie_read_reg32(struct brcmf_pciedev_info * devinfo,u32 reg_offset)347 brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
348 {
349 void __iomem *address = devinfo->regs + reg_offset;
350
351 return (ioread32(address));
352 }
353
354
355 static void
brcmf_pcie_write_reg32(struct brcmf_pciedev_info * devinfo,u32 reg_offset,u32 value)356 brcmf_pcie_write_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset,
357 u32 value)
358 {
359 void __iomem *address = devinfo->regs + reg_offset;
360
361 iowrite32(value, address);
362 }
363
364
365 static u8
brcmf_pcie_read_tcm8(struct brcmf_pciedev_info * devinfo,u32 mem_offset)366 brcmf_pcie_read_tcm8(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
367 {
368 void __iomem *address = devinfo->tcm + mem_offset;
369
370 return (ioread8(address));
371 }
372
373
374 static u16
brcmf_pcie_read_tcm16(struct brcmf_pciedev_info * devinfo,u32 mem_offset)375 brcmf_pcie_read_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
376 {
377 void __iomem *address = devinfo->tcm + mem_offset;
378
379 return (ioread16(address));
380 }
381
382
383 static void
brcmf_pcie_write_tcm16(struct brcmf_pciedev_info * devinfo,u32 mem_offset,u16 value)384 brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
385 u16 value)
386 {
387 void __iomem *address = devinfo->tcm + mem_offset;
388
389 iowrite16(value, address);
390 }
391
392
393 static u16
brcmf_pcie_read_idx(struct brcmf_pciedev_info * devinfo,u32 mem_offset)394 brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
395 {
396 u16 *address = devinfo->idxbuf + mem_offset;
397
398 return (*(address));
399 }
400
401
402 static void
brcmf_pcie_write_idx(struct brcmf_pciedev_info * devinfo,u32 mem_offset,u16 value)403 brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
404 u16 value)
405 {
406 u16 *address = devinfo->idxbuf + mem_offset;
407
408 *(address) = value;
409 }
410
411
412 static u32
brcmf_pcie_read_tcm32(struct brcmf_pciedev_info * devinfo,u32 mem_offset)413 brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
414 {
415 void __iomem *address = devinfo->tcm + mem_offset;
416
417 return (ioread32(address));
418 }
419
420
421 static void
brcmf_pcie_write_tcm32(struct brcmf_pciedev_info * devinfo,u32 mem_offset,u32 value)422 brcmf_pcie_write_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
423 u32 value)
424 {
425 void __iomem *address = devinfo->tcm + mem_offset;
426
427 iowrite32(value, address);
428 }
429
430
431 static u32
brcmf_pcie_read_ram32(struct brcmf_pciedev_info * devinfo,u32 mem_offset)432 brcmf_pcie_read_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
433 {
434 void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
435
436 return (ioread32(addr));
437 }
438
439
440 static void
brcmf_pcie_write_ram32(struct brcmf_pciedev_info * devinfo,u32 mem_offset,u32 value)441 brcmf_pcie_write_ram32(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
442 u32 value)
443 {
444 void __iomem *addr = devinfo->tcm + devinfo->ci->rambase + mem_offset;
445
446 iowrite32(value, addr);
447 }
448
449
450 static void
brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info * devinfo,u32 mem_offset,void * dstaddr,u32 len)451 brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
452 void *dstaddr, u32 len)
453 {
454 void __iomem *address = devinfo->tcm + mem_offset;
455 __le32 *dst32;
456 __le16 *dst16;
457 u8 *dst8;
458
459 if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) {
460 if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) {
461 dst8 = (u8 *)dstaddr;
462 while (len) {
463 *dst8 = ioread8(address);
464 address++;
465 dst8++;
466 len--;
467 }
468 } else {
469 len = len / 2;
470 dst16 = (__le16 *)dstaddr;
471 while (len) {
472 *dst16 = cpu_to_le16(ioread16(address));
473 address += 2;
474 dst16++;
475 len--;
476 }
477 }
478 } else {
479 len = len / 4;
480 dst32 = (__le32 *)dstaddr;
481 while (len) {
482 *dst32 = cpu_to_le32(ioread32(address));
483 address += 4;
484 dst32++;
485 len--;
486 }
487 }
488 }
489
490
491 #define WRITECC32(devinfo, reg, value) brcmf_pcie_write_reg32(devinfo, \
492 CHIPCREGOFFS(reg), value)
493
494
495 static void
brcmf_pcie_select_core(struct brcmf_pciedev_info * devinfo,u16 coreid)496 brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid)
497 {
498 const struct pci_dev *pdev = devinfo->pdev;
499 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
500 struct brcmf_core *core;
501 u32 bar0_win;
502
503 core = brcmf_chip_get_core(devinfo->ci, coreid);
504 if (core) {
505 bar0_win = core->base;
506 pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, bar0_win);
507 if (pci_read_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW,
508 &bar0_win) == 0) {
509 if (bar0_win != core->base) {
510 bar0_win = core->base;
511 pci_write_config_dword(pdev,
512 BRCMF_PCIE_BAR0_WINDOW,
513 bar0_win);
514 }
515 }
516 } else {
517 brcmf_err(bus, "Unsupported core selected %x\n", coreid);
518 }
519 }
520
521
brcmf_pcie_reset_device(struct brcmf_pciedev_info * devinfo)522 static void brcmf_pcie_reset_device(struct brcmf_pciedev_info *devinfo)
523 {
524 struct brcmf_core *core;
525 u16 cfg_offset[] = { BRCMF_PCIE_CFGREG_STATUS_CMD,
526 BRCMF_PCIE_CFGREG_PM_CSR,
527 BRCMF_PCIE_CFGREG_MSI_CAP,
528 BRCMF_PCIE_CFGREG_MSI_ADDR_L,
529 BRCMF_PCIE_CFGREG_MSI_ADDR_H,
530 BRCMF_PCIE_CFGREG_MSI_DATA,
531 BRCMF_PCIE_CFGREG_LINK_STATUS_CTRL2,
532 BRCMF_PCIE_CFGREG_RBAR_CTRL,
533 BRCMF_PCIE_CFGREG_PML1_SUB_CTRL1,
534 BRCMF_PCIE_CFGREG_REG_BAR2_CONFIG,
535 BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG };
536 u32 i;
537 u32 val;
538 u32 lsc;
539
540 if (!devinfo->ci)
541 return;
542
543 /* Disable ASPM */
544 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
545 pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
546 &lsc);
547 val = lsc & (~BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB);
548 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
549 val);
550
551 /* Watchdog reset */
552 brcmf_pcie_select_core(devinfo, BCMA_CORE_CHIPCOMMON);
553 WRITECC32(devinfo, watchdog, 4);
554 msleep(100);
555
556 /* Restore ASPM */
557 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
558 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_LINK_STATUS_CTRL,
559 lsc);
560
561 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
562 if (core->rev <= 13) {
563 for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) {
564 brcmf_pcie_write_reg32(devinfo,
565 BRCMF_PCIE_PCIE2REG_CONFIGADDR,
566 cfg_offset[i]);
567 val = brcmf_pcie_read_reg32(devinfo,
568 BRCMF_PCIE_PCIE2REG_CONFIGDATA);
569 brcmf_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n",
570 cfg_offset[i], val);
571 brcmf_pcie_write_reg32(devinfo,
572 BRCMF_PCIE_PCIE2REG_CONFIGDATA,
573 val);
574 }
575 }
576 }
577
578
brcmf_pcie_attach(struct brcmf_pciedev_info * devinfo)579 static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
580 {
581 u32 config;
582
583 /* BAR1 window may not be sized properly */
584 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
585 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
586 config = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA);
587 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGDATA, config);
588
589 device_wakeup_enable(&devinfo->pdev->dev);
590 }
591
592
brcmf_pcie_enter_download_state(struct brcmf_pciedev_info * devinfo)593 static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo)
594 {
595 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
596 brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4);
597 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
598 5);
599 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
600 0);
601 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX,
602 7);
603 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKPDA,
604 0);
605 }
606 return 0;
607 }
608
609
brcmf_pcie_exit_download_state(struct brcmf_pciedev_info * devinfo,u32 resetintr)610 static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo,
611 u32 resetintr)
612 {
613 struct brcmf_core *core;
614
615 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) {
616 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_INTERNAL_MEM);
617 brcmf_chip_resetcore(core, 0, 0, 0);
618 }
619
620 if (!brcmf_chip_set_active(devinfo->ci, resetintr))
621 return -EINVAL;
622 return 0;
623 }
624
625
626 static int
brcmf_pcie_send_mb_data(struct brcmf_pciedev_info * devinfo,u32 htod_mb_data)627 brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
628 {
629 struct brcmf_pcie_shared_info *shared;
630 struct brcmf_core *core;
631 u32 addr;
632 u32 cur_htod_mb_data;
633 u32 i;
634
635 shared = &devinfo->shared;
636 addr = shared->htod_mb_data_addr;
637 cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
638
639 if (cur_htod_mb_data != 0)
640 brcmf_dbg(PCIE, "MB transaction is already pending 0x%04x\n",
641 cur_htod_mb_data);
642
643 i = 0;
644 while (cur_htod_mb_data != 0) {
645 msleep(10);
646 i++;
647 if (i > 100)
648 return -EIO;
649 cur_htod_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
650 }
651
652 brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data);
653 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
654
655 /* Send mailbox interrupt twice as a hardware workaround */
656 core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
657 if (core->rev <= 13)
658 pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
659
660 return 0;
661 }
662
663
brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info * devinfo)664 static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
665 {
666 struct brcmf_pcie_shared_info *shared;
667 u32 addr;
668 u32 dtoh_mb_data;
669
670 shared = &devinfo->shared;
671 addr = shared->dtoh_mb_data_addr;
672 dtoh_mb_data = brcmf_pcie_read_tcm32(devinfo, addr);
673
674 if (!dtoh_mb_data)
675 return;
676
677 brcmf_pcie_write_tcm32(devinfo, addr, 0);
678
679 brcmf_dbg(PCIE, "D2H_MB_DATA: 0x%04x\n", dtoh_mb_data);
680 if (dtoh_mb_data & BRCMF_D2H_DEV_DS_ENTER_REQ) {
681 brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP REQ\n");
682 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_DS_ACK);
683 brcmf_dbg(PCIE, "D2H_MB_DATA: sent DEEP SLEEP ACK\n");
684 }
685 if (dtoh_mb_data & BRCMF_D2H_DEV_DS_EXIT_NOTE)
686 brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n");
687 if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) {
688 brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n");
689 devinfo->mbdata_completed = true;
690 wake_up(&devinfo->mbdata_resp_wait);
691 }
692 if (dtoh_mb_data & BRCMF_D2H_DEV_FWHALT) {
693 brcmf_dbg(PCIE, "D2H_MB_DATA: FW HALT\n");
694 brcmf_fw_crashed(&devinfo->pdev->dev);
695 }
696 }
697
698
brcmf_pcie_bus_console_init(struct brcmf_pciedev_info * devinfo)699 static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
700 {
701 struct brcmf_pcie_shared_info *shared;
702 struct brcmf_pcie_console *console;
703 u32 addr;
704
705 shared = &devinfo->shared;
706 console = &shared->console;
707 addr = shared->tcm_base_address + BRCMF_SHARED_CONSOLE_ADDR_OFFSET;
708 console->base_addr = brcmf_pcie_read_tcm32(devinfo, addr);
709
710 addr = console->base_addr + BRCMF_CONSOLE_BUFADDR_OFFSET;
711 console->buf_addr = brcmf_pcie_read_tcm32(devinfo, addr);
712 addr = console->base_addr + BRCMF_CONSOLE_BUFSIZE_OFFSET;
713 console->bufsize = brcmf_pcie_read_tcm32(devinfo, addr);
714
715 brcmf_dbg(FWCON, "Console: base %x, buf %x, size %d\n",
716 console->base_addr, console->buf_addr, console->bufsize);
717 }
718
719 /**
720 * brcmf_pcie_bus_console_read - reads firmware messages
721 *
722 * @error: specifies if error has occurred (prints messages unconditionally)
723 */
brcmf_pcie_bus_console_read(struct brcmf_pciedev_info * devinfo,bool error)724 static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo,
725 bool error)
726 {
727 struct pci_dev *pdev = devinfo->pdev;
728 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
729 struct brcmf_pcie_console *console;
730 u32 addr;
731 u8 ch;
732 u32 newidx;
733
734 if (!error && !BRCMF_FWCON_ON())
735 return;
736
737 console = &devinfo->shared.console;
738 addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
739 newidx = brcmf_pcie_read_tcm32(devinfo, addr);
740 while (newidx != console->read_idx) {
741 addr = console->buf_addr + console->read_idx;
742 ch = brcmf_pcie_read_tcm8(devinfo, addr);
743 console->read_idx++;
744 if (console->read_idx == console->bufsize)
745 console->read_idx = 0;
746 if (ch == '\r')
747 continue;
748 console->log_str[console->log_idx] = ch;
749 console->log_idx++;
750 if ((ch != '\n') &&
751 (console->log_idx == (sizeof(console->log_str) - 2))) {
752 ch = '\n';
753 console->log_str[console->log_idx] = ch;
754 console->log_idx++;
755 }
756 if (ch == '\n') {
757 console->log_str[console->log_idx] = 0;
758 if (error)
759 __brcmf_err(bus, __func__, "CONSOLE: %s",
760 console->log_str);
761 else
762 pr_debug("CONSOLE: %s", console->log_str);
763 console->log_idx = 0;
764 }
765 }
766 }
767
768
brcmf_pcie_intr_disable(struct brcmf_pciedev_info * devinfo)769 static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo)
770 {
771 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, 0);
772 }
773
774
brcmf_pcie_intr_enable(struct brcmf_pciedev_info * devinfo)775 static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo)
776 {
777 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK,
778 BRCMF_PCIE_MB_INT_D2H_DB |
779 BRCMF_PCIE_MB_INT_FN0_0 |
780 BRCMF_PCIE_MB_INT_FN0_1);
781 }
782
brcmf_pcie_hostready(struct brcmf_pciedev_info * devinfo)783 static void brcmf_pcie_hostready(struct brcmf_pciedev_info *devinfo)
784 {
785 if (devinfo->shared.flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1)
786 brcmf_pcie_write_reg32(devinfo,
787 BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1, 1);
788 }
789
brcmf_pcie_quick_check_isr(int irq,void * arg)790 static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg)
791 {
792 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
793
794 if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT)) {
795 brcmf_pcie_intr_disable(devinfo);
796 brcmf_dbg(PCIE, "Enter\n");
797 return IRQ_WAKE_THREAD;
798 }
799 return IRQ_NONE;
800 }
801
802
brcmf_pcie_isr_thread(int irq,void * arg)803 static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg)
804 {
805 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg;
806 u32 status;
807
808 devinfo->in_irq = true;
809 status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
810 brcmf_dbg(PCIE, "Enter %x\n", status);
811 if (status) {
812 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
813 status);
814 if (status & (BRCMF_PCIE_MB_INT_FN0_0 |
815 BRCMF_PCIE_MB_INT_FN0_1))
816 brcmf_pcie_handle_mb_data(devinfo);
817 if (status & BRCMF_PCIE_MB_INT_D2H_DB) {
818 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
819 brcmf_proto_msgbuf_rx_trigger(
820 &devinfo->pdev->dev);
821 }
822 }
823 brcmf_pcie_bus_console_read(devinfo, false);
824 if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
825 brcmf_pcie_intr_enable(devinfo);
826 devinfo->in_irq = false;
827 return IRQ_HANDLED;
828 }
829
830
brcmf_pcie_request_irq(struct brcmf_pciedev_info * devinfo)831 static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo)
832 {
833 struct pci_dev *pdev = devinfo->pdev;
834 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
835
836 brcmf_pcie_intr_disable(devinfo);
837
838 brcmf_dbg(PCIE, "Enter\n");
839
840 pci_enable_msi(pdev);
841 if (request_threaded_irq(pdev->irq, brcmf_pcie_quick_check_isr,
842 brcmf_pcie_isr_thread, IRQF_SHARED,
843 "brcmf_pcie_intr", devinfo)) {
844 pci_disable_msi(pdev);
845 brcmf_err(bus, "Failed to request IRQ %d\n", pdev->irq);
846 return -EIO;
847 }
848 devinfo->irq_allocated = true;
849 return 0;
850 }
851
852
brcmf_pcie_release_irq(struct brcmf_pciedev_info * devinfo)853 static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo)
854 {
855 struct pci_dev *pdev = devinfo->pdev;
856 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
857 u32 status;
858 u32 count;
859
860 if (!devinfo->irq_allocated)
861 return;
862
863 brcmf_pcie_intr_disable(devinfo);
864 free_irq(pdev->irq, devinfo);
865 pci_disable_msi(pdev);
866
867 msleep(50);
868 count = 0;
869 while ((devinfo->in_irq) && (count < 20)) {
870 msleep(50);
871 count++;
872 }
873 if (devinfo->in_irq)
874 brcmf_err(bus, "Still in IRQ (processing) !!!\n");
875
876 status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
877 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, status);
878
879 devinfo->irq_allocated = false;
880 }
881
882
brcmf_pcie_ring_mb_write_rptr(void * ctx)883 static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
884 {
885 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
886 struct brcmf_pciedev_info *devinfo = ring->devinfo;
887 struct brcmf_commonring *commonring = &ring->commonring;
888
889 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
890 return -EIO;
891
892 brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
893 commonring->w_ptr, ring->id);
894
895 devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
896
897 return 0;
898 }
899
900
brcmf_pcie_ring_mb_write_wptr(void * ctx)901 static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
902 {
903 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
904 struct brcmf_pciedev_info *devinfo = ring->devinfo;
905 struct brcmf_commonring *commonring = &ring->commonring;
906
907 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
908 return -EIO;
909
910 brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
911 commonring->r_ptr, ring->id);
912
913 devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
914
915 return 0;
916 }
917
918
brcmf_pcie_ring_mb_ring_bell(void * ctx)919 static int brcmf_pcie_ring_mb_ring_bell(void *ctx)
920 {
921 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
922 struct brcmf_pciedev_info *devinfo = ring->devinfo;
923
924 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
925 return -EIO;
926
927 brcmf_dbg(PCIE, "RING !\n");
928 /* Any arbitrary value will do, lets use 1 */
929 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0, 1);
930
931 return 0;
932 }
933
934
brcmf_pcie_ring_mb_update_rptr(void * ctx)935 static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
936 {
937 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
938 struct brcmf_pciedev_info *devinfo = ring->devinfo;
939 struct brcmf_commonring *commonring = &ring->commonring;
940
941 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
942 return -EIO;
943
944 commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
945
946 brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
947 commonring->w_ptr, ring->id);
948
949 return 0;
950 }
951
952
brcmf_pcie_ring_mb_update_wptr(void * ctx)953 static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
954 {
955 struct brcmf_pcie_ringbuf *ring = (struct brcmf_pcie_ringbuf *)ctx;
956 struct brcmf_pciedev_info *devinfo = ring->devinfo;
957 struct brcmf_commonring *commonring = &ring->commonring;
958
959 if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
960 return -EIO;
961
962 commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
963
964 brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
965 commonring->r_ptr, ring->id);
966
967 return 0;
968 }
969
970
971 static void *
brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info * devinfo,u32 size,u32 tcm_dma_phys_addr,dma_addr_t * dma_handle)972 brcmf_pcie_init_dmabuffer_for_device(struct brcmf_pciedev_info *devinfo,
973 u32 size, u32 tcm_dma_phys_addr,
974 dma_addr_t *dma_handle)
975 {
976 void *ring;
977 u64 address;
978
979 ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle,
980 GFP_KERNEL);
981 if (!ring)
982 return NULL;
983
984 address = (u64)*dma_handle;
985 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr,
986 address & 0xffffffff);
987 brcmf_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32);
988
989 return (ring);
990 }
991
992
993 static struct brcmf_pcie_ringbuf *
brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info * devinfo,u32 ring_id,u32 tcm_ring_phys_addr)994 brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id,
995 u32 tcm_ring_phys_addr)
996 {
997 void *dma_buf;
998 dma_addr_t dma_handle;
999 struct brcmf_pcie_ringbuf *ring;
1000 u32 size;
1001 u32 addr;
1002 const u32 *ring_itemsize_array;
1003
1004 if (devinfo->shared.version < BRCMF_PCIE_SHARED_VERSION_7)
1005 ring_itemsize_array = brcmf_ring_itemsize_pre_v7;
1006 else
1007 ring_itemsize_array = brcmf_ring_itemsize;
1008
1009 size = brcmf_ring_max_item[ring_id] * ring_itemsize_array[ring_id];
1010 dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size,
1011 tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET,
1012 &dma_handle);
1013 if (!dma_buf)
1014 return NULL;
1015
1016 addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET;
1017 brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]);
1018 addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET;
1019 brcmf_pcie_write_tcm16(devinfo, addr, ring_itemsize_array[ring_id]);
1020
1021 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1022 if (!ring) {
1023 dma_free_coherent(&devinfo->pdev->dev, size, dma_buf,
1024 dma_handle);
1025 return NULL;
1026 }
1027 brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id],
1028 ring_itemsize_array[ring_id], dma_buf);
1029 ring->dma_handle = dma_handle;
1030 ring->devinfo = devinfo;
1031 brcmf_commonring_register_cb(&ring->commonring,
1032 brcmf_pcie_ring_mb_ring_bell,
1033 brcmf_pcie_ring_mb_update_rptr,
1034 brcmf_pcie_ring_mb_update_wptr,
1035 brcmf_pcie_ring_mb_write_rptr,
1036 brcmf_pcie_ring_mb_write_wptr, ring);
1037
1038 return (ring);
1039 }
1040
1041
brcmf_pcie_release_ringbuffer(struct device * dev,struct brcmf_pcie_ringbuf * ring)1042 static void brcmf_pcie_release_ringbuffer(struct device *dev,
1043 struct brcmf_pcie_ringbuf *ring)
1044 {
1045 void *dma_buf;
1046 u32 size;
1047
1048 if (!ring)
1049 return;
1050
1051 dma_buf = ring->commonring.buf_addr;
1052 if (dma_buf) {
1053 size = ring->commonring.depth * ring->commonring.item_len;
1054 dma_free_coherent(dev, size, dma_buf, ring->dma_handle);
1055 }
1056 kfree(ring);
1057 }
1058
1059
brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info * devinfo)1060 static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
1061 {
1062 u32 i;
1063
1064 for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
1065 brcmf_pcie_release_ringbuffer(&devinfo->pdev->dev,
1066 devinfo->shared.commonrings[i]);
1067 devinfo->shared.commonrings[i] = NULL;
1068 }
1069 kfree(devinfo->shared.flowrings);
1070 devinfo->shared.flowrings = NULL;
1071 if (devinfo->idxbuf) {
1072 dma_free_coherent(&devinfo->pdev->dev,
1073 devinfo->idxbuf_sz,
1074 devinfo->idxbuf,
1075 devinfo->idxbuf_dmahandle);
1076 devinfo->idxbuf = NULL;
1077 }
1078 }
1079
1080
brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info * devinfo)1081 static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
1082 {
1083 struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1084 struct brcmf_pcie_ringbuf *ring;
1085 struct brcmf_pcie_ringbuf *rings;
1086 u32 d2h_w_idx_ptr;
1087 u32 d2h_r_idx_ptr;
1088 u32 h2d_w_idx_ptr;
1089 u32 h2d_r_idx_ptr;
1090 u32 ring_mem_ptr;
1091 u32 i;
1092 u64 address;
1093 u32 bufsz;
1094 u8 idx_offset;
1095 struct brcmf_pcie_dhi_ringinfo ringinfo;
1096 u16 max_flowrings;
1097 u16 max_submissionrings;
1098 u16 max_completionrings;
1099
1100 memcpy_fromio(&ringinfo, devinfo->tcm + devinfo->shared.ring_info_addr,
1101 sizeof(ringinfo));
1102 if (devinfo->shared.version >= 6) {
1103 max_submissionrings = le16_to_cpu(ringinfo.max_submissionrings);
1104 max_flowrings = le16_to_cpu(ringinfo.max_flowrings);
1105 max_completionrings = le16_to_cpu(ringinfo.max_completionrings);
1106 } else {
1107 max_submissionrings = le16_to_cpu(ringinfo.max_flowrings);
1108 max_flowrings = max_submissionrings -
1109 BRCMF_NROF_H2D_COMMON_MSGRINGS;
1110 max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
1111 }
1112
1113 if (devinfo->dma_idx_sz != 0) {
1114 bufsz = (max_submissionrings + max_completionrings) *
1115 devinfo->dma_idx_sz * 2;
1116 devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
1117 &devinfo->idxbuf_dmahandle,
1118 GFP_KERNEL);
1119 if (!devinfo->idxbuf)
1120 devinfo->dma_idx_sz = 0;
1121 }
1122
1123 if (devinfo->dma_idx_sz == 0) {
1124 d2h_w_idx_ptr = le32_to_cpu(ringinfo.d2h_w_idx_ptr);
1125 d2h_r_idx_ptr = le32_to_cpu(ringinfo.d2h_r_idx_ptr);
1126 h2d_w_idx_ptr = le32_to_cpu(ringinfo.h2d_w_idx_ptr);
1127 h2d_r_idx_ptr = le32_to_cpu(ringinfo.h2d_r_idx_ptr);
1128 idx_offset = sizeof(u32);
1129 devinfo->write_ptr = brcmf_pcie_write_tcm16;
1130 devinfo->read_ptr = brcmf_pcie_read_tcm16;
1131 brcmf_dbg(PCIE, "Using TCM indices\n");
1132 } else {
1133 memset(devinfo->idxbuf, 0, bufsz);
1134 devinfo->idxbuf_sz = bufsz;
1135 idx_offset = devinfo->dma_idx_sz;
1136 devinfo->write_ptr = brcmf_pcie_write_idx;
1137 devinfo->read_ptr = brcmf_pcie_read_idx;
1138
1139 h2d_w_idx_ptr = 0;
1140 address = (u64)devinfo->idxbuf_dmahandle;
1141 ringinfo.h2d_w_idx_hostaddr.low_addr =
1142 cpu_to_le32(address & 0xffffffff);
1143 ringinfo.h2d_w_idx_hostaddr.high_addr =
1144 cpu_to_le32(address >> 32);
1145
1146 h2d_r_idx_ptr = h2d_w_idx_ptr +
1147 max_submissionrings * idx_offset;
1148 address += max_submissionrings * idx_offset;
1149 ringinfo.h2d_r_idx_hostaddr.low_addr =
1150 cpu_to_le32(address & 0xffffffff);
1151 ringinfo.h2d_r_idx_hostaddr.high_addr =
1152 cpu_to_le32(address >> 32);
1153
1154 d2h_w_idx_ptr = h2d_r_idx_ptr +
1155 max_submissionrings * idx_offset;
1156 address += max_submissionrings * idx_offset;
1157 ringinfo.d2h_w_idx_hostaddr.low_addr =
1158 cpu_to_le32(address & 0xffffffff);
1159 ringinfo.d2h_w_idx_hostaddr.high_addr =
1160 cpu_to_le32(address >> 32);
1161
1162 d2h_r_idx_ptr = d2h_w_idx_ptr +
1163 max_completionrings * idx_offset;
1164 address += max_completionrings * idx_offset;
1165 ringinfo.d2h_r_idx_hostaddr.low_addr =
1166 cpu_to_le32(address & 0xffffffff);
1167 ringinfo.d2h_r_idx_hostaddr.high_addr =
1168 cpu_to_le32(address >> 32);
1169
1170 memcpy_toio(devinfo->tcm + devinfo->shared.ring_info_addr,
1171 &ringinfo, sizeof(ringinfo));
1172 brcmf_dbg(PCIE, "Using host memory indices\n");
1173 }
1174
1175 ring_mem_ptr = le32_to_cpu(ringinfo.ringmem);
1176
1177 for (i = 0; i < BRCMF_NROF_H2D_COMMON_MSGRINGS; i++) {
1178 ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
1179 if (!ring)
1180 goto fail;
1181 ring->w_idx_addr = h2d_w_idx_ptr;
1182 ring->r_idx_addr = h2d_r_idx_ptr;
1183 ring->id = i;
1184 devinfo->shared.commonrings[i] = ring;
1185
1186 h2d_w_idx_ptr += idx_offset;
1187 h2d_r_idx_ptr += idx_offset;
1188 ring_mem_ptr += BRCMF_RING_MEM_SZ;
1189 }
1190
1191 for (i = BRCMF_NROF_H2D_COMMON_MSGRINGS;
1192 i < BRCMF_NROF_COMMON_MSGRINGS; i++) {
1193 ring = brcmf_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr);
1194 if (!ring)
1195 goto fail;
1196 ring->w_idx_addr = d2h_w_idx_ptr;
1197 ring->r_idx_addr = d2h_r_idx_ptr;
1198 ring->id = i;
1199 devinfo->shared.commonrings[i] = ring;
1200
1201 d2h_w_idx_ptr += idx_offset;
1202 d2h_r_idx_ptr += idx_offset;
1203 ring_mem_ptr += BRCMF_RING_MEM_SZ;
1204 }
1205
1206 devinfo->shared.max_flowrings = max_flowrings;
1207 devinfo->shared.max_submissionrings = max_submissionrings;
1208 devinfo->shared.max_completionrings = max_completionrings;
1209 rings = kcalloc(max_flowrings, sizeof(*ring), GFP_KERNEL);
1210 if (!rings)
1211 goto fail;
1212
1213 brcmf_dbg(PCIE, "Nr of flowrings is %d\n", max_flowrings);
1214
1215 for (i = 0; i < max_flowrings; i++) {
1216 ring = &rings[i];
1217 ring->devinfo = devinfo;
1218 ring->id = i + BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
1219 brcmf_commonring_register_cb(&ring->commonring,
1220 brcmf_pcie_ring_mb_ring_bell,
1221 brcmf_pcie_ring_mb_update_rptr,
1222 brcmf_pcie_ring_mb_update_wptr,
1223 brcmf_pcie_ring_mb_write_rptr,
1224 brcmf_pcie_ring_mb_write_wptr,
1225 ring);
1226 ring->w_idx_addr = h2d_w_idx_ptr;
1227 ring->r_idx_addr = h2d_r_idx_ptr;
1228 h2d_w_idx_ptr += idx_offset;
1229 h2d_r_idx_ptr += idx_offset;
1230 }
1231 devinfo->shared.flowrings = rings;
1232
1233 return 0;
1234
1235 fail:
1236 brcmf_err(bus, "Allocating ring buffers failed\n");
1237 brcmf_pcie_release_ringbuffers(devinfo);
1238 return -ENOMEM;
1239 }
1240
1241
1242 static void
brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info * devinfo)1243 brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1244 {
1245 if (devinfo->shared.scratch)
1246 dma_free_coherent(&devinfo->pdev->dev,
1247 BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1248 devinfo->shared.scratch,
1249 devinfo->shared.scratch_dmahandle);
1250 if (devinfo->shared.ringupd)
1251 dma_free_coherent(&devinfo->pdev->dev,
1252 BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1253 devinfo->shared.ringupd,
1254 devinfo->shared.ringupd_dmahandle);
1255 }
1256
brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info * devinfo)1257 static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1258 {
1259 struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1260 u64 address;
1261 u32 addr;
1262
1263 devinfo->shared.scratch =
1264 dma_alloc_coherent(&devinfo->pdev->dev,
1265 BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1266 &devinfo->shared.scratch_dmahandle,
1267 GFP_KERNEL);
1268 if (!devinfo->shared.scratch)
1269 goto fail;
1270
1271 addr = devinfo->shared.tcm_base_address +
1272 BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
1273 address = (u64)devinfo->shared.scratch_dmahandle;
1274 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1275 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1276 addr = devinfo->shared.tcm_base_address +
1277 BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
1278 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
1279
1280 devinfo->shared.ringupd =
1281 dma_alloc_coherent(&devinfo->pdev->dev,
1282 BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1283 &devinfo->shared.ringupd_dmahandle,
1284 GFP_KERNEL);
1285 if (!devinfo->shared.ringupd)
1286 goto fail;
1287
1288 addr = devinfo->shared.tcm_base_address +
1289 BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
1290 address = (u64)devinfo->shared.ringupd_dmahandle;
1291 brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
1292 brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
1293 addr = devinfo->shared.tcm_base_address +
1294 BRCMF_SHARED_DMA_RINGUPD_LEN_OFFSET;
1295 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
1296 return 0;
1297
1298 fail:
1299 brcmf_err(bus, "Allocating scratch buffers failed\n");
1300 brcmf_pcie_release_scratchbuffers(devinfo);
1301 return -ENOMEM;
1302 }
1303
1304
brcmf_pcie_down(struct device * dev)1305 static void brcmf_pcie_down(struct device *dev)
1306 {
1307 }
1308
brcmf_pcie_preinit(struct device * dev)1309 static int brcmf_pcie_preinit(struct device *dev)
1310 {
1311 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1312 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1313
1314 brcmf_dbg(PCIE, "Enter\n");
1315
1316 brcmf_pcie_intr_enable(buspub->devinfo);
1317 brcmf_pcie_hostready(buspub->devinfo);
1318
1319 return 0;
1320 }
1321
brcmf_pcie_tx(struct device * dev,struct sk_buff * skb)1322 static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
1323 {
1324 return 0;
1325 }
1326
1327
brcmf_pcie_tx_ctlpkt(struct device * dev,unsigned char * msg,uint len)1328 static int brcmf_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg,
1329 uint len)
1330 {
1331 return 0;
1332 }
1333
1334
brcmf_pcie_rx_ctlpkt(struct device * dev,unsigned char * msg,uint len)1335 static int brcmf_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg,
1336 uint len)
1337 {
1338 return 0;
1339 }
1340
1341
brcmf_pcie_wowl_config(struct device * dev,bool enabled)1342 static void brcmf_pcie_wowl_config(struct device *dev, bool enabled)
1343 {
1344 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1345 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1346 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1347
1348 brcmf_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled);
1349 devinfo->wowl_enabled = enabled;
1350 }
1351
1352
brcmf_pcie_get_ramsize(struct device * dev)1353 static size_t brcmf_pcie_get_ramsize(struct device *dev)
1354 {
1355 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1356 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1357 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1358
1359 return devinfo->ci->ramsize - devinfo->ci->srsize;
1360 }
1361
1362
brcmf_pcie_get_memdump(struct device * dev,void * data,size_t len)1363 static int brcmf_pcie_get_memdump(struct device *dev, void *data, size_t len)
1364 {
1365 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1366 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1367 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1368
1369 brcmf_dbg(PCIE, "dump at 0x%08X: len=%zu\n", devinfo->ci->rambase, len);
1370 brcmf_pcie_copy_dev_tomem(devinfo, devinfo->ci->rambase, data, len);
1371 return 0;
1372 }
1373
1374 static
brcmf_pcie_get_fwname(struct device * dev,const char * ext,u8 * fw_name)1375 int brcmf_pcie_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
1376 {
1377 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1378 struct brcmf_fw_request *fwreq;
1379 struct brcmf_fw_name fwnames[] = {
1380 { ext, fw_name },
1381 };
1382
1383 fwreq = brcmf_fw_alloc_request(bus_if->chip, bus_if->chiprev,
1384 brcmf_pcie_fwnames,
1385 ARRAY_SIZE(brcmf_pcie_fwnames),
1386 fwnames, ARRAY_SIZE(fwnames));
1387 if (!fwreq)
1388 return -ENOMEM;
1389
1390 kfree(fwreq);
1391 return 0;
1392 }
1393
brcmf_pcie_reset(struct device * dev)1394 static int brcmf_pcie_reset(struct device *dev)
1395 {
1396 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1397 struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
1398 struct brcmf_pciedev_info *devinfo = buspub->devinfo;
1399 struct brcmf_fw_request *fwreq;
1400 int err;
1401
1402 brcmf_pcie_intr_disable(devinfo);
1403
1404 brcmf_pcie_bus_console_read(devinfo, true);
1405
1406 brcmf_detach(dev);
1407
1408 brcmf_pcie_release_irq(devinfo);
1409 brcmf_pcie_release_scratchbuffers(devinfo);
1410 brcmf_pcie_release_ringbuffers(devinfo);
1411 brcmf_pcie_reset_device(devinfo);
1412
1413 fwreq = brcmf_pcie_prepare_fw_request(devinfo);
1414 if (!fwreq) {
1415 dev_err(dev, "Failed to prepare FW request\n");
1416 return -ENOMEM;
1417 }
1418
1419 err = brcmf_fw_get_firmwares(dev, fwreq, brcmf_pcie_setup);
1420 if (err) {
1421 dev_err(dev, "Failed to prepare FW request\n");
1422 kfree(fwreq);
1423 }
1424
1425 return err;
1426 }
1427
1428 static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
1429 .preinit = brcmf_pcie_preinit,
1430 .txdata = brcmf_pcie_tx,
1431 .stop = brcmf_pcie_down,
1432 .txctl = brcmf_pcie_tx_ctlpkt,
1433 .rxctl = brcmf_pcie_rx_ctlpkt,
1434 .wowl_config = brcmf_pcie_wowl_config,
1435 .get_ramsize = brcmf_pcie_get_ramsize,
1436 .get_memdump = brcmf_pcie_get_memdump,
1437 .get_fwname = brcmf_pcie_get_fwname,
1438 .reset = brcmf_pcie_reset,
1439 };
1440
1441
1442 static void
brcmf_pcie_adjust_ramsize(struct brcmf_pciedev_info * devinfo,u8 * data,u32 data_len)1443 brcmf_pcie_adjust_ramsize(struct brcmf_pciedev_info *devinfo, u8 *data,
1444 u32 data_len)
1445 {
1446 __le32 *field;
1447 u32 newsize;
1448
1449 if (data_len < BRCMF_RAMSIZE_OFFSET + 8)
1450 return;
1451
1452 field = (__le32 *)&data[BRCMF_RAMSIZE_OFFSET];
1453 if (le32_to_cpup(field) != BRCMF_RAMSIZE_MAGIC)
1454 return;
1455 field++;
1456 newsize = le32_to_cpup(field);
1457
1458 brcmf_dbg(PCIE, "Found ramsize info in FW, adjusting to 0x%x\n",
1459 newsize);
1460 devinfo->ci->ramsize = newsize;
1461 }
1462
1463
1464 static int
brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info * devinfo,u32 sharedram_addr)1465 brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
1466 u32 sharedram_addr)
1467 {
1468 struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1469 struct brcmf_pcie_shared_info *shared;
1470 u32 addr;
1471
1472 shared = &devinfo->shared;
1473 shared->tcm_base_address = sharedram_addr;
1474
1475 shared->flags = brcmf_pcie_read_tcm32(devinfo, sharedram_addr);
1476 shared->version = (u8)(shared->flags & BRCMF_PCIE_SHARED_VERSION_MASK);
1477 brcmf_dbg(PCIE, "PCIe protocol version %d\n", shared->version);
1478 if ((shared->version > BRCMF_PCIE_MAX_SHARED_VERSION) ||
1479 (shared->version < BRCMF_PCIE_MIN_SHARED_VERSION)) {
1480 brcmf_err(bus, "Unsupported PCIE version %d\n",
1481 shared->version);
1482 return -EINVAL;
1483 }
1484
1485 /* check firmware support dma indicies */
1486 if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) {
1487 if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX)
1488 devinfo->dma_idx_sz = sizeof(u16);
1489 else
1490 devinfo->dma_idx_sz = sizeof(u32);
1491 }
1492
1493 addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
1494 shared->max_rxbufpost = brcmf_pcie_read_tcm16(devinfo, addr);
1495 if (shared->max_rxbufpost == 0)
1496 shared->max_rxbufpost = BRCMF_DEF_MAX_RXBUFPOST;
1497
1498 addr = sharedram_addr + BRCMF_SHARED_RX_DATAOFFSET_OFFSET;
1499 shared->rx_dataoffset = brcmf_pcie_read_tcm32(devinfo, addr);
1500
1501 addr = sharedram_addr + BRCMF_SHARED_HTOD_MB_DATA_ADDR_OFFSET;
1502 shared->htod_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1503
1504 addr = sharedram_addr + BRCMF_SHARED_DTOH_MB_DATA_ADDR_OFFSET;
1505 shared->dtoh_mb_data_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1506
1507 addr = sharedram_addr + BRCMF_SHARED_RING_INFO_ADDR_OFFSET;
1508 shared->ring_info_addr = brcmf_pcie_read_tcm32(devinfo, addr);
1509
1510 brcmf_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n",
1511 shared->max_rxbufpost, shared->rx_dataoffset);
1512
1513 brcmf_pcie_bus_console_init(devinfo);
1514
1515 return 0;
1516 }
1517
1518
brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info * devinfo,const struct firmware * fw,void * nvram,u32 nvram_len)1519 static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
1520 const struct firmware *fw, void *nvram,
1521 u32 nvram_len)
1522 {
1523 struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev);
1524 u32 sharedram_addr;
1525 u32 sharedram_addr_written;
1526 u32 loop_counter;
1527 int err;
1528 u32 address;
1529 u32 resetintr;
1530
1531 brcmf_dbg(PCIE, "Halt ARM.\n");
1532 err = brcmf_pcie_enter_download_state(devinfo);
1533 if (err)
1534 return err;
1535
1536 brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
1537 memcpy_toio(devinfo->tcm + devinfo->ci->rambase,
1538 (void *)fw->data, fw->size);
1539
1540 resetintr = get_unaligned_le32(fw->data);
1541 release_firmware(fw);
1542
1543 /* reset last 4 bytes of RAM address. to be used for shared
1544 * area. This identifies when FW is running
1545 */
1546 brcmf_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, 0);
1547
1548 if (nvram) {
1549 brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
1550 address = devinfo->ci->rambase + devinfo->ci->ramsize -
1551 nvram_len;
1552 memcpy_toio(devinfo->tcm + address, nvram, nvram_len);
1553 brcmf_fw_nvram_free(nvram);
1554 } else {
1555 brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
1556 devinfo->nvram_name);
1557 }
1558
1559 sharedram_addr_written = brcmf_pcie_read_ram32(devinfo,
1560 devinfo->ci->ramsize -
1561 4);
1562 brcmf_dbg(PCIE, "Bring ARM in running state\n");
1563 err = brcmf_pcie_exit_download_state(devinfo, resetintr);
1564 if (err)
1565 return err;
1566
1567 brcmf_dbg(PCIE, "Wait for FW init\n");
1568 sharedram_addr = sharedram_addr_written;
1569 loop_counter = BRCMF_PCIE_FW_UP_TIMEOUT / 50;
1570 while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) {
1571 msleep(50);
1572 sharedram_addr = brcmf_pcie_read_ram32(devinfo,
1573 devinfo->ci->ramsize -
1574 4);
1575 loop_counter--;
1576 }
1577 if (sharedram_addr == sharedram_addr_written) {
1578 brcmf_err(bus, "FW failed to initialize\n");
1579 return -ENODEV;
1580 }
1581 if (sharedram_addr < devinfo->ci->rambase ||
1582 sharedram_addr >= devinfo->ci->rambase + devinfo->ci->ramsize) {
1583 brcmf_err(bus, "Invalid shared RAM address 0x%08x\n",
1584 sharedram_addr);
1585 return -ENODEV;
1586 }
1587 brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr);
1588
1589 return (brcmf_pcie_init_share_ram_info(devinfo, sharedram_addr));
1590 }
1591
1592
brcmf_pcie_get_resource(struct brcmf_pciedev_info * devinfo)1593 static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo)
1594 {
1595 struct pci_dev *pdev = devinfo->pdev;
1596 struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
1597 int err;
1598 phys_addr_t bar0_addr, bar1_addr;
1599 ulong bar1_size;
1600
1601 err = pci_enable_device(pdev);
1602 if (err) {
1603 brcmf_err(bus, "pci_enable_device failed err=%d\n", err);
1604 return err;
1605 }
1606
1607 pci_set_master(pdev);
1608
1609 /* Bar-0 mapped address */
1610 bar0_addr = pci_resource_start(pdev, 0);
1611 /* Bar-1 mapped address */
1612 bar1_addr = pci_resource_start(pdev, 2);
1613 /* read Bar-1 mapped memory range */
1614 bar1_size = pci_resource_len(pdev, 2);
1615 if ((bar1_size == 0) || (bar1_addr == 0)) {
1616 brcmf_err(bus, "BAR1 Not enabled, device size=%ld, addr=%#016llx\n",
1617 bar1_size, (unsigned long long)bar1_addr);
1618 return -EINVAL;
1619 }
1620
1621 devinfo->regs = ioremap(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE);
1622 devinfo->tcm = ioremap(bar1_addr, bar1_size);
1623
1624 if (!devinfo->regs || !devinfo->tcm) {
1625 brcmf_err(bus, "ioremap() failed (%p,%p)\n", devinfo->regs,
1626 devinfo->tcm);
1627 return -EINVAL;
1628 }
1629 brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n",
1630 devinfo->regs, (unsigned long long)bar0_addr);
1631 brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx size 0x%x\n",
1632 devinfo->tcm, (unsigned long long)bar1_addr,
1633 (unsigned int)bar1_size);
1634
1635 return 0;
1636 }
1637
1638
brcmf_pcie_release_resource(struct brcmf_pciedev_info * devinfo)1639 static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo)
1640 {
1641 if (devinfo->tcm)
1642 iounmap(devinfo->tcm);
1643 if (devinfo->regs)
1644 iounmap(devinfo->regs);
1645
1646 pci_disable_device(devinfo->pdev);
1647 }
1648
1649
brcmf_pcie_buscore_prep_addr(const struct pci_dev * pdev,u32 addr)1650 static u32 brcmf_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr)
1651 {
1652 u32 ret_addr;
1653
1654 ret_addr = addr & (BRCMF_PCIE_BAR0_REG_SIZE - 1);
1655 addr &= ~(BRCMF_PCIE_BAR0_REG_SIZE - 1);
1656 pci_write_config_dword(pdev, BRCMF_PCIE_BAR0_WINDOW, addr);
1657
1658 return ret_addr;
1659 }
1660
1661
brcmf_pcie_buscore_read32(void * ctx,u32 addr)1662 static u32 brcmf_pcie_buscore_read32(void *ctx, u32 addr)
1663 {
1664 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1665
1666 addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
1667 return brcmf_pcie_read_reg32(devinfo, addr);
1668 }
1669
1670
brcmf_pcie_buscore_write32(void * ctx,u32 addr,u32 value)1671 static void brcmf_pcie_buscore_write32(void *ctx, u32 addr, u32 value)
1672 {
1673 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1674
1675 addr = brcmf_pcie_buscore_prep_addr(devinfo->pdev, addr);
1676 brcmf_pcie_write_reg32(devinfo, addr, value);
1677 }
1678
1679
brcmf_pcie_buscoreprep(void * ctx)1680 static int brcmf_pcie_buscoreprep(void *ctx)
1681 {
1682 return brcmf_pcie_get_resource(ctx);
1683 }
1684
1685
brcmf_pcie_buscore_reset(void * ctx,struct brcmf_chip * chip)1686 static int brcmf_pcie_buscore_reset(void *ctx, struct brcmf_chip *chip)
1687 {
1688 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1689 u32 val;
1690
1691 devinfo->ci = chip;
1692 brcmf_pcie_reset_device(devinfo);
1693
1694 val = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT);
1695 if (val != 0xffffffff)
1696 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT,
1697 val);
1698
1699 return 0;
1700 }
1701
1702
brcmf_pcie_buscore_activate(void * ctx,struct brcmf_chip * chip,u32 rstvec)1703 static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip,
1704 u32 rstvec)
1705 {
1706 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx;
1707
1708 brcmf_pcie_write_tcm32(devinfo, 0, rstvec);
1709 }
1710
1711
1712 static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = {
1713 .prepare = brcmf_pcie_buscoreprep,
1714 .reset = brcmf_pcie_buscore_reset,
1715 .activate = brcmf_pcie_buscore_activate,
1716 .read32 = brcmf_pcie_buscore_read32,
1717 .write32 = brcmf_pcie_buscore_write32,
1718 };
1719
1720 #define BRCMF_PCIE_FW_CODE 0
1721 #define BRCMF_PCIE_FW_NVRAM 1
1722
brcmf_pcie_setup(struct device * dev,int ret,struct brcmf_fw_request * fwreq)1723 static void brcmf_pcie_setup(struct device *dev, int ret,
1724 struct brcmf_fw_request *fwreq)
1725 {
1726 const struct firmware *fw;
1727 void *nvram;
1728 struct brcmf_bus *bus;
1729 struct brcmf_pciedev *pcie_bus_dev;
1730 struct brcmf_pciedev_info *devinfo;
1731 struct brcmf_commonring **flowrings;
1732 u32 i, nvram_len;
1733
1734 /* check firmware loading result */
1735 if (ret)
1736 goto fail;
1737
1738 bus = dev_get_drvdata(dev);
1739 pcie_bus_dev = bus->bus_priv.pcie;
1740 devinfo = pcie_bus_dev->devinfo;
1741 brcmf_pcie_attach(devinfo);
1742
1743 fw = fwreq->items[BRCMF_PCIE_FW_CODE].binary;
1744 nvram = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.data;
1745 nvram_len = fwreq->items[BRCMF_PCIE_FW_NVRAM].nv_data.len;
1746 kfree(fwreq);
1747
1748 ret = brcmf_chip_get_raminfo(devinfo->ci);
1749 if (ret) {
1750 brcmf_err(bus, "Failed to get RAM info\n");
1751 release_firmware(fw);
1752 brcmf_fw_nvram_free(nvram);
1753 goto fail;
1754 }
1755
1756 /* Some of the firmwares have the size of the memory of the device
1757 * defined inside the firmware. This is because part of the memory in
1758 * the device is shared and the devision is determined by FW. Parse
1759 * the firmware and adjust the chip memory size now.
1760 */
1761 brcmf_pcie_adjust_ramsize(devinfo, (u8 *)fw->data, fw->size);
1762
1763 ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len);
1764 if (ret)
1765 goto fail;
1766
1767 devinfo->state = BRCMFMAC_PCIE_STATE_UP;
1768
1769 ret = brcmf_pcie_init_ringbuffers(devinfo);
1770 if (ret)
1771 goto fail;
1772
1773 ret = brcmf_pcie_init_scratchbuffers(devinfo);
1774 if (ret)
1775 goto fail;
1776
1777 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
1778 ret = brcmf_pcie_request_irq(devinfo);
1779 if (ret)
1780 goto fail;
1781
1782 /* hook the commonrings in the bus structure. */
1783 for (i = 0; i < BRCMF_NROF_COMMON_MSGRINGS; i++)
1784 bus->msgbuf->commonrings[i] =
1785 &devinfo->shared.commonrings[i]->commonring;
1786
1787 flowrings = kcalloc(devinfo->shared.max_flowrings, sizeof(*flowrings),
1788 GFP_KERNEL);
1789 if (!flowrings)
1790 goto fail;
1791
1792 for (i = 0; i < devinfo->shared.max_flowrings; i++)
1793 flowrings[i] = &devinfo->shared.flowrings[i].commonring;
1794 bus->msgbuf->flowrings = flowrings;
1795
1796 bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset;
1797 bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost;
1798 bus->msgbuf->max_flowrings = devinfo->shared.max_flowrings;
1799
1800 init_waitqueue_head(&devinfo->mbdata_resp_wait);
1801
1802 ret = brcmf_attach(&devinfo->pdev->dev);
1803 if (ret)
1804 goto fail;
1805
1806 brcmf_pcie_bus_console_read(devinfo, false);
1807
1808 return;
1809
1810 fail:
1811 device_release_driver(dev);
1812 }
1813
1814 static struct brcmf_fw_request *
brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info * devinfo)1815 brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
1816 {
1817 struct brcmf_fw_request *fwreq;
1818 struct brcmf_fw_name fwnames[] = {
1819 { ".bin", devinfo->fw_name },
1820 { ".txt", devinfo->nvram_name },
1821 };
1822
1823 fwreq = brcmf_fw_alloc_request(devinfo->ci->chip, devinfo->ci->chiprev,
1824 brcmf_pcie_fwnames,
1825 ARRAY_SIZE(brcmf_pcie_fwnames),
1826 fwnames, ARRAY_SIZE(fwnames));
1827 if (!fwreq)
1828 return NULL;
1829
1830 fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
1831 fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
1832 fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
1833 fwreq->board_type = devinfo->settings->board_type;
1834 /* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
1835 fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
1836 fwreq->bus_nr = devinfo->pdev->bus->number;
1837
1838 return fwreq;
1839 }
1840
1841 static int
brcmf_pcie_probe(struct pci_dev * pdev,const struct pci_device_id * id)1842 brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1843 {
1844 int ret;
1845 struct brcmf_fw_request *fwreq;
1846 struct brcmf_pciedev_info *devinfo;
1847 struct brcmf_pciedev *pcie_bus_dev;
1848 struct brcmf_bus *bus;
1849
1850 brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
1851
1852 ret = -ENOMEM;
1853 devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
1854 if (devinfo == NULL)
1855 return ret;
1856
1857 devinfo->pdev = pdev;
1858 pcie_bus_dev = NULL;
1859 devinfo->ci = brcmf_chip_attach(devinfo, &brcmf_pcie_buscore_ops);
1860 if (IS_ERR(devinfo->ci)) {
1861 ret = PTR_ERR(devinfo->ci);
1862 devinfo->ci = NULL;
1863 goto fail;
1864 }
1865
1866 pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL);
1867 if (pcie_bus_dev == NULL) {
1868 ret = -ENOMEM;
1869 goto fail;
1870 }
1871
1872 devinfo->settings = brcmf_get_module_param(&devinfo->pdev->dev,
1873 BRCMF_BUSTYPE_PCIE,
1874 devinfo->ci->chip,
1875 devinfo->ci->chiprev);
1876 if (!devinfo->settings) {
1877 ret = -ENOMEM;
1878 goto fail;
1879 }
1880
1881 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
1882 if (!bus) {
1883 ret = -ENOMEM;
1884 goto fail;
1885 }
1886 bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL);
1887 if (!bus->msgbuf) {
1888 ret = -ENOMEM;
1889 kfree(bus);
1890 goto fail;
1891 }
1892
1893 /* hook it all together. */
1894 pcie_bus_dev->devinfo = devinfo;
1895 pcie_bus_dev->bus = bus;
1896 bus->dev = &pdev->dev;
1897 bus->bus_priv.pcie = pcie_bus_dev;
1898 bus->ops = &brcmf_pcie_bus_ops;
1899 bus->proto_type = BRCMF_PROTO_MSGBUF;
1900 bus->chip = devinfo->coreid;
1901 bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot);
1902 dev_set_drvdata(&pdev->dev, bus);
1903
1904 ret = brcmf_alloc(&devinfo->pdev->dev, devinfo->settings);
1905 if (ret)
1906 goto fail_bus;
1907
1908 fwreq = brcmf_pcie_prepare_fw_request(devinfo);
1909 if (!fwreq) {
1910 ret = -ENOMEM;
1911 goto fail_brcmf;
1912 }
1913
1914 ret = brcmf_fw_get_firmwares(bus->dev, fwreq, brcmf_pcie_setup);
1915 if (ret < 0) {
1916 kfree(fwreq);
1917 goto fail_brcmf;
1918 }
1919 return 0;
1920
1921 fail_brcmf:
1922 brcmf_free(&devinfo->pdev->dev);
1923 fail_bus:
1924 kfree(bus->msgbuf);
1925 kfree(bus);
1926 fail:
1927 brcmf_err(NULL, "failed %x:%x\n", pdev->vendor, pdev->device);
1928 brcmf_pcie_release_resource(devinfo);
1929 if (devinfo->ci)
1930 brcmf_chip_detach(devinfo->ci);
1931 if (devinfo->settings)
1932 brcmf_release_module_param(devinfo->settings);
1933 kfree(pcie_bus_dev);
1934 kfree(devinfo);
1935 return ret;
1936 }
1937
1938
1939 static void
brcmf_pcie_remove(struct pci_dev * pdev)1940 brcmf_pcie_remove(struct pci_dev *pdev)
1941 {
1942 struct brcmf_pciedev_info *devinfo;
1943 struct brcmf_bus *bus;
1944
1945 brcmf_dbg(PCIE, "Enter\n");
1946
1947 bus = dev_get_drvdata(&pdev->dev);
1948 if (bus == NULL)
1949 return;
1950
1951 devinfo = bus->bus_priv.pcie->devinfo;
1952
1953 devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
1954 if (devinfo->ci)
1955 brcmf_pcie_intr_disable(devinfo);
1956
1957 brcmf_detach(&pdev->dev);
1958 brcmf_free(&pdev->dev);
1959
1960 kfree(bus->bus_priv.pcie);
1961 kfree(bus->msgbuf->flowrings);
1962 kfree(bus->msgbuf);
1963 kfree(bus);
1964
1965 brcmf_pcie_release_irq(devinfo);
1966 brcmf_pcie_release_scratchbuffers(devinfo);
1967 brcmf_pcie_release_ringbuffers(devinfo);
1968 brcmf_pcie_reset_device(devinfo);
1969 brcmf_pcie_release_resource(devinfo);
1970
1971 if (devinfo->ci)
1972 brcmf_chip_detach(devinfo->ci);
1973 if (devinfo->settings)
1974 brcmf_release_module_param(devinfo->settings);
1975
1976 kfree(devinfo);
1977 dev_set_drvdata(&pdev->dev, NULL);
1978 }
1979
1980
1981 #ifdef CONFIG_PM
1982
1983
brcmf_pcie_pm_enter_D3(struct device * dev)1984 static int brcmf_pcie_pm_enter_D3(struct device *dev)
1985 {
1986 struct brcmf_pciedev_info *devinfo;
1987 struct brcmf_bus *bus;
1988
1989 brcmf_dbg(PCIE, "Enter\n");
1990
1991 bus = dev_get_drvdata(dev);
1992 devinfo = bus->bus_priv.pcie->devinfo;
1993
1994 brcmf_bus_change_state(bus, BRCMF_BUS_DOWN);
1995
1996 devinfo->mbdata_completed = false;
1997 brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM);
1998
1999 wait_event_timeout(devinfo->mbdata_resp_wait, devinfo->mbdata_completed,
2000 BRCMF_PCIE_MBDATA_TIMEOUT);
2001 if (!devinfo->mbdata_completed) {
2002 brcmf_err(bus, "Timeout on response for entering D3 substate\n");
2003 brcmf_bus_change_state(bus, BRCMF_BUS_UP);
2004 return -EIO;
2005 }
2006
2007 devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
2008
2009 return 0;
2010 }
2011
2012
brcmf_pcie_pm_leave_D3(struct device * dev)2013 static int brcmf_pcie_pm_leave_D3(struct device *dev)
2014 {
2015 struct brcmf_pciedev_info *devinfo;
2016 struct brcmf_bus *bus;
2017 struct pci_dev *pdev;
2018 int err;
2019
2020 brcmf_dbg(PCIE, "Enter\n");
2021
2022 bus = dev_get_drvdata(dev);
2023 devinfo = bus->bus_priv.pcie->devinfo;
2024 brcmf_dbg(PCIE, "Enter, dev=%p, bus=%p\n", dev, bus);
2025
2026 /* Check if device is still up and running, if so we are ready */
2027 if (brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_INTMASK) != 0) {
2028 brcmf_dbg(PCIE, "Try to wakeup device....\n");
2029 if (brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D0_INFORM))
2030 goto cleanup;
2031 brcmf_dbg(PCIE, "Hot resume, continue....\n");
2032 devinfo->state = BRCMFMAC_PCIE_STATE_UP;
2033 brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
2034 brcmf_bus_change_state(bus, BRCMF_BUS_UP);
2035 brcmf_pcie_intr_enable(devinfo);
2036 brcmf_pcie_hostready(devinfo);
2037 return 0;
2038 }
2039
2040 cleanup:
2041 brcmf_chip_detach(devinfo->ci);
2042 devinfo->ci = NULL;
2043 pdev = devinfo->pdev;
2044 brcmf_pcie_remove(pdev);
2045
2046 err = brcmf_pcie_probe(pdev, NULL);
2047 if (err)
2048 __brcmf_err(NULL, __func__, "probe after resume failed, err=%d\n", err);
2049
2050 return err;
2051 }
2052
2053
2054 static const struct dev_pm_ops brcmf_pciedrvr_pm = {
2055 .suspend = brcmf_pcie_pm_enter_D3,
2056 .resume = brcmf_pcie_pm_leave_D3,
2057 .freeze = brcmf_pcie_pm_enter_D3,
2058 .restore = brcmf_pcie_pm_leave_D3,
2059 };
2060
2061
2062 #endif /* CONFIG_PM */
2063
2064
2065 #define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
2066 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
2067 #define BRCMF_PCIE_DEVICE_SUB(dev_id, subvend, subdev) { \
2068 BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
2069 subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
2070
2071 static const struct pci_device_id brcmf_pcie_devid_table[] = {
2072 BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
2073 BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355),
2074 BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID),
2075 BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
2076 BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
2077 BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
2078 BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
2079 BRCMF_PCIE_DEVICE(BRCM_PCIE_4359_DEVICE_ID),
2080 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
2081 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
2082 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
2083 BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
2084 BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID),
2085 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
2086 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
2087 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
2088 BRCMF_PCIE_DEVICE_SUB(0x4365, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4365),
2089 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID),
2090 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
2091 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
2092 BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID),
2093 { /* end: all zeroes */ }
2094 };
2095
2096
2097 MODULE_DEVICE_TABLE(pci, brcmf_pcie_devid_table);
2098
2099
2100 static struct pci_driver brcmf_pciedrvr = {
2101 .node = {},
2102 .name = KBUILD_MODNAME,
2103 .id_table = brcmf_pcie_devid_table,
2104 .probe = brcmf_pcie_probe,
2105 .remove = brcmf_pcie_remove,
2106 #ifdef CONFIG_PM
2107 .driver.pm = &brcmf_pciedrvr_pm,
2108 #endif
2109 .driver.coredump = brcmf_dev_coredump,
2110 };
2111
2112
brcmf_pcie_register(void)2113 int brcmf_pcie_register(void)
2114 {
2115 brcmf_dbg(PCIE, "Enter\n");
2116 return pci_register_driver(&brcmf_pciedrvr);
2117 }
2118
2119
brcmf_pcie_exit(void)2120 void brcmf_pcie_exit(void)
2121 {
2122 brcmf_dbg(PCIE, "Enter\n");
2123 pci_unregister_driver(&brcmf_pciedrvr);
2124 }
2125