1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCIe host controller driver for Rockchip SoCs
4 *
5 * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
6 * http://www.rock-chips.com
7 *
8 * Author: Simon Xue <xxm@rock-chips.com>
9 */
10
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/fs.h>
14 #include <linux/gpio.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/iopoll.h>
18 #include <linux/irq.h>
19 #include <linux/irqchip/chained_irq.h>
20 #include <linux/irqdomain.h>
21 #include <linux/kernel.h>
22 #include <linux/kthread.h>
23 #include <linux/list.h>
24 #include <linux/mfd/syscon.h>
25 #include <linux/miscdevice.h>
26 #include <linux/module.h>
27 #include <linux/of_address.h>
28 #include <linux/of_device.h>
29 #include <linux/of_gpio.h>
30 #include <linux/of_pci.h>
31 #include <linux/pci.h>
32 #include <linux/phy/phy.h>
33 #include <linux/phy/pcie.h>
34 #include <linux/platform_device.h>
35 #include <linux/poll.h>
36 #include <linux/regmap.h>
37 #include <linux/reset.h>
38 #include <linux/resource.h>
39 #include <linux/rfkill-wlan.h>
40 #include <linux/signal.h>
41 #include <linux/types.h>
42 #include <linux/uaccess.h>
43 #include <linux/pci-epf.h>
44
45 #include "pcie-designware.h"
46 #include "../../pci.h"
47 #include "../rockchip-pcie-dma.h"
48 #include "pcie-dw-dmatest.h"
49
50 enum rk_pcie_device_mode {
51 RK_PCIE_EP_TYPE,
52 RK_PCIE_RC_TYPE,
53 };
54
55 #define RK_PCIE_DBG 0
56
57 #define PCIE_DMA_OFFSET 0x380000
58
59 #define PCIE_DMA_CTRL_OFF 0x8
60 #define PCIE_DMA_WR_ENB 0xc
61 #define PCIE_DMA_WR_CTRL_LO 0x200
62 #define PCIE_DMA_WR_CTRL_HI 0x204
63 #define PCIE_DMA_WR_XFERSIZE 0x208
64 #define PCIE_DMA_WR_SAR_PTR_LO 0x20c
65 #define PCIE_DMA_WR_SAR_PTR_HI 0x210
66 #define PCIE_DMA_WR_DAR_PTR_LO 0x214
67 #define PCIE_DMA_WR_DAR_PTR_HI 0x218
68 #define PCIE_DMA_WR_WEILO 0x18
69 #define PCIE_DMA_WR_WEIHI 0x1c
70 #define PCIE_DMA_WR_DOORBELL 0x10
71 #define PCIE_DMA_WR_INT_STATUS 0x4c
72 #define PCIE_DMA_WR_INT_MASK 0x54
73 #define PCIE_DMA_WR_INT_CLEAR 0x58
74
75 #define PCIE_DMA_RD_ENB 0x2c
76 #define PCIE_DMA_RD_CTRL_LO 0x300
77 #define PCIE_DMA_RD_CTRL_HI 0x304
78 #define PCIE_DMA_RD_XFERSIZE 0x308
79 #define PCIE_DMA_RD_SAR_PTR_LO 0x30c
80 #define PCIE_DMA_RD_SAR_PTR_HI 0x310
81 #define PCIE_DMA_RD_DAR_PTR_LO 0x314
82 #define PCIE_DMA_RD_DAR_PTR_HI 0x318
83 #define PCIE_DMA_RD_WEILO 0x38
84 #define PCIE_DMA_RD_WEIHI 0x3c
85 #define PCIE_DMA_RD_DOORBELL 0x30
86 #define PCIE_DMA_RD_INT_STATUS 0xa0
87 #define PCIE_DMA_RD_INT_MASK 0xa8
88 #define PCIE_DMA_RD_INT_CLEAR 0xac
89
90 #define PCIE_DMA_CHANEL_MAX_NUM 2
91
92 /* Parameters for the waiting for iATU enabled routine */
93 #define LINK_WAIT_IATU_MIN 9000
94 #define LINK_WAIT_IATU_MAX 10000
95
96 #define PCIE_DIRECT_SPEED_CHANGE (0x1 << 17)
97
98 #define PCIE_TYPE0_STATUS_COMMAND_REG 0x4
99 #define PCIE_TYPE0_BAR0_REG 0x10
100
101 #define PCIE_CAP_LINK_CONTROL2_LINK_STATUS 0xa0
102
103 #define PCIE_CLIENT_INTR_STATUS_MSG_RX 0x04
104 #define PME_TO_ACK (BIT(9) | BIT(25))
105 #define PCIE_CLIENT_INTR_STATUS_LEGACY 0x08
106 #define PCIE_CLIENT_INTR_STATUS_MISC 0x10
107 #define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c
108 #define UNMASK_ALL_LEGACY_INT 0xffff0000
109 #define MASK_LEGACY_INT(x) (0x00110011 << x)
110 #define UNMASK_LEGACY_INT(x) (0x00110000 << x)
111 #define PCIE_CLIENT_INTR_MASK 0x24
112 #define PCIE_CLIENT_POWER 0x2c
113 #define READY_ENTER_L23 BIT(3)
114 #define PCIE_CLIENT_MSG_GEN 0x34
115 #define PME_TURN_OFF (BIT(4) | BIT(20))
116 #define PCIE_CLIENT_GENERAL_DEBUG 0x104
117 #define PCIE_CLIENT_HOT_RESET_CTRL 0x180
118 #define PCIE_LTSSM_APP_DLY1_EN BIT(0)
119 #define PCIE_LTSSM_APP_DLY2_EN BIT(1)
120 #define PCIE_LTSSM_APP_DLY1_DONE BIT(2)
121 #define PCIE_LTSSM_APP_DLY2_DONE BIT(3)
122 #define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
123 #define PCIE_CLIENT_LTSSM_STATUS 0x300
124 #define SMLH_LINKUP BIT(16)
125 #define RDLH_LINKUP BIT(17)
126 #define PCIE_CLIENT_CDM_RASDES_TBA_INFO_CMN 0x154
127 #define PCIE_CLIENT_DBG_FIFO_MODE_CON 0x310
128 #define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D0 0x320
129 #define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D1 0x324
130 #define PCIE_CLIENT_DBG_FIFO_TRN_HIT_D0 0x328
131 #define PCIE_CLIENT_DBG_FIFO_TRN_HIT_D1 0x32c
132 #define PCIE_CLIENT_DBG_FIFO_STATUS 0x350
133 #define PCIE_CLIENT_DBG_TRANSITION_DATA 0xffff0000
134 #define PCIE_CLIENT_DBF_EN 0xffff0007
135
136 #define PCIE_PHY_LINKUP BIT(0)
137 #define PCIE_DATA_LINKUP BIT(1)
138
139 #define PCIE_TYPE0_HDR_DBI2_OFFSET 0x100000
140 #define PCIE_SB_BAR0_MASK_REG 0x100010
141
142 #define PCIE_PL_ORDER_RULE_CTRL_OFF 0x8B4
143 #define RK_PCIE_L2_TMOUT_US 5000
144 #define RK_PCIE_HOTRESET_TMOUT_US 10000
145
146 enum rk_pcie_ltssm_code {
147 S_L0 = 0x11,
148 S_L0S = 0x12,
149 S_L1_IDLE = 0x14,
150 S_L2_IDLE = 0x15,
151 S_MAX = 0x1f,
152 };
153
154 struct rk_pcie {
155 struct dw_pcie *pci;
156 enum rk_pcie_device_mode mode;
157 enum phy_mode phy_mode;
158 int phy_sub_mode;
159 unsigned char bar_to_atu[6];
160 phys_addr_t *outbound_addr;
161 unsigned long *ib_window_map;
162 unsigned long *ob_window_map;
163 unsigned int num_ib_windows;
164 unsigned int num_ob_windows;
165 void __iomem *dbi_base;
166 void __iomem *apb_base;
167 struct phy *phy;
168 struct clk_bulk_data *clks;
169 struct reset_control *rsts;
170 unsigned int clk_cnt;
171 struct gpio_desc *rst_gpio;
172 u32 perst_inactive_ms;
173 struct gpio_desc *prsnt_gpio;
174 phys_addr_t mem_start;
175 size_t mem_size;
176 struct pcie_port pp;
177 struct regmap *usb_pcie_grf;
178 struct regmap *pmu_grf;
179 struct dma_trx_obj *dma_obj;
180 bool in_suspend;
181 bool skip_scan_in_resume;
182 bool is_rk1808;
183 bool is_signal_test;
184 bool bifurcation;
185 bool supports_clkreq;
186 struct regulator *vpcie3v3;
187 struct irq_domain *irq_domain;
188 raw_spinlock_t intx_lock;
189 u16 aspm;
190 u32 l1ss_ctl1;
191 struct dentry *debugfs;
192 u32 msi_vector_num;
193 struct workqueue_struct *hot_rst_wq;
194 struct work_struct hot_rst_work;
195 };
196
197 struct rk_pcie_of_data {
198 enum rk_pcie_device_mode mode;
199 u32 msi_vector_num;
200 };
201
202 #define to_rk_pcie(x) dev_get_drvdata((x)->dev)
203
rk_pcie_read(void __iomem * addr,int size,u32 * val)204 static int rk_pcie_read(void __iomem *addr, int size, u32 *val)
205 {
206 if ((uintptr_t)addr & (size - 1)) {
207 *val = 0;
208 return PCIBIOS_BAD_REGISTER_NUMBER;
209 }
210
211 if (size == 4) {
212 *val = readl(addr);
213 } else if (size == 2) {
214 *val = readw(addr);
215 } else if (size == 1) {
216 *val = readb(addr);
217 } else {
218 *val = 0;
219 return PCIBIOS_BAD_REGISTER_NUMBER;
220 }
221
222 return PCIBIOS_SUCCESSFUL;
223 }
224
rk_pcie_write(void __iomem * addr,int size,u32 val)225 static int rk_pcie_write(void __iomem *addr, int size, u32 val)
226 {
227 if ((uintptr_t)addr & (size - 1))
228 return PCIBIOS_BAD_REGISTER_NUMBER;
229
230 if (size == 4)
231 writel(val, addr);
232 else if (size == 2)
233 writew(val, addr);
234 else if (size == 1)
235 writeb(val, addr);
236 else
237 return PCIBIOS_BAD_REGISTER_NUMBER;
238
239 return PCIBIOS_SUCCESSFUL;
240 }
241
__rk_pcie_read_apb(struct rk_pcie * rk_pcie,void __iomem * base,u32 reg,size_t size)242 static u32 __rk_pcie_read_apb(struct rk_pcie *rk_pcie, void __iomem *base,
243 u32 reg, size_t size)
244 {
245 int ret;
246 u32 val;
247
248 ret = rk_pcie_read(base + reg, size, &val);
249 if (ret)
250 dev_err(rk_pcie->pci->dev, "Read APB address failed\n");
251
252 return val;
253 }
254
__rk_pcie_write_apb(struct rk_pcie * rk_pcie,void __iomem * base,u32 reg,size_t size,u32 val)255 static void __rk_pcie_write_apb(struct rk_pcie *rk_pcie, void __iomem *base,
256 u32 reg, size_t size, u32 val)
257 {
258 int ret;
259
260 ret = rk_pcie_write(base + reg, size, val);
261 if (ret)
262 dev_err(rk_pcie->pci->dev, "Write APB address failed\n");
263 }
264
rk_pcie_readl_apb(struct rk_pcie * rk_pcie,u32 reg)265 static inline u32 rk_pcie_readl_apb(struct rk_pcie *rk_pcie, u32 reg)
266 {
267 return __rk_pcie_read_apb(rk_pcie, rk_pcie->apb_base, reg, 0x4);
268 }
269
rk_pcie_writel_apb(struct rk_pcie * rk_pcie,u32 reg,u32 val)270 static inline void rk_pcie_writel_apb(struct rk_pcie *rk_pcie, u32 reg,
271 u32 val)
272 {
273 __rk_pcie_write_apb(rk_pcie, rk_pcie->apb_base, reg, 0x4, val);
274 }
275
rk_pcie_iatu_unroll_enabled(struct dw_pcie * pci)276 static u8 rk_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
277 {
278 u32 val;
279
280 val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
281 if (val == 0xffffffff)
282 return 1;
283
284 return 0;
285 }
286
rk_pcie_writel_atu(struct dw_pcie * pci,u32 reg,u32 val)287 static void rk_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
288 {
289 int ret;
290
291 if (pci->ops->write_dbi) {
292 pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);
293 return;
294 }
295
296 ret = dw_pcie_write(pci->atu_base + reg, 4, val);
297 if (ret)
298 dev_err(pci->dev, "Write ATU address failed\n");
299 }
300
rk_pcie_writel_ib_unroll(struct dw_pcie * pci,u32 index,u32 reg,u32 val)301 static void rk_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
302 u32 val)
303 {
304 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
305
306 rk_pcie_writel_atu(pci, offset + reg, val);
307 }
308
rk_pcie_readl_atu(struct dw_pcie * pci,u32 reg)309 static u32 rk_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
310 {
311 int ret;
312 u32 val;
313
314 if (pci->ops->read_dbi)
315 return pci->ops->read_dbi(pci, pci->atu_base, reg, 4);
316
317 ret = dw_pcie_read(pci->atu_base + reg, 4, &val);
318 if (ret)
319 dev_err(pci->dev, "Read ATU address failed\n");
320
321 return val;
322 }
323
rk_pcie_readl_ib_unroll(struct dw_pcie * pci,u32 index,u32 reg)324 static u32 rk_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
325 {
326 u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
327
328 return rk_pcie_readl_atu(pci, offset + reg);
329 }
330
rk_pcie_prog_inbound_atu_unroll(struct dw_pcie * pci,u8 func_no,int index,int bar,u64 cpu_addr,enum dw_pcie_as_type as_type)331 static int rk_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
332 int index, int bar, u64 cpu_addr,
333 enum dw_pcie_as_type as_type)
334 {
335 int type;
336 u32 retries, val;
337
338 rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
339 lower_32_bits(cpu_addr));
340 rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
341 upper_32_bits(cpu_addr));
342
343 switch (as_type) {
344 case DW_PCIE_AS_MEM:
345 type = PCIE_ATU_TYPE_MEM;
346 break;
347 case DW_PCIE_AS_IO:
348 type = PCIE_ATU_TYPE_IO;
349 break;
350 default:
351 return -EINVAL;
352 }
353
354 rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type |
355 PCIE_ATU_FUNC_NUM(func_no));
356 rk_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
357 PCIE_ATU_FUNC_NUM_MATCH_EN |
358 PCIE_ATU_ENABLE |
359 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
360
361 /*
362 * Make sure ATU enable takes effect before any subsequent config
363 * and I/O accesses.
364 */
365 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
366 val = rk_pcie_readl_ib_unroll(pci, index,
367 PCIE_ATU_UNR_REGION_CTRL2);
368 if (val & PCIE_ATU_ENABLE)
369 return 0;
370
371 mdelay(LINK_WAIT_IATU);
372 }
373 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
374
375 return -EBUSY;
376 }
377
378
rk_pcie_prog_inbound_atu(struct dw_pcie * pci,u8 func_no,int index,int bar,u64 cpu_addr,enum dw_pcie_as_type as_type)379 static int rk_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
380 int bar, u64 cpu_addr,
381 enum dw_pcie_as_type as_type)
382 {
383 int type;
384 u32 retries, val;
385
386 if (pci->iatu_unroll_enabled)
387 return rk_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar,
388 cpu_addr, as_type);
389
390 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
391 index);
392 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
393 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
394
395 switch (as_type) {
396 case DW_PCIE_AS_MEM:
397 type = PCIE_ATU_TYPE_MEM;
398 break;
399 case DW_PCIE_AS_IO:
400 type = PCIE_ATU_TYPE_IO;
401 break;
402 default:
403 return -EINVAL;
404 }
405
406 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
407 PCIE_ATU_FUNC_NUM(func_no));
408 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE |
409 PCIE_ATU_FUNC_NUM_MATCH_EN |
410 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
411
412 /*
413 * Make sure ATU enable takes effect before any subsequent config
414 * and I/O accesses.
415 */
416 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
417 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
418 if (val & PCIE_ATU_ENABLE)
419 return 0;
420
421 mdelay(LINK_WAIT_IATU);
422 }
423 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
424
425 return -EBUSY;
426 }
427
rk_pcie_ep_inbound_atu(struct rk_pcie * rk_pcie,enum pci_barno bar,dma_addr_t cpu_addr,enum dw_pcie_as_type as_type)428 static int rk_pcie_ep_inbound_atu(struct rk_pcie *rk_pcie,
429 enum pci_barno bar, dma_addr_t cpu_addr,
430 enum dw_pcie_as_type as_type)
431 {
432 int ret;
433 u32 free_win;
434 u8 func_no = 0x0;
435
436 if (rk_pcie->in_suspend) {
437 free_win = rk_pcie->bar_to_atu[bar];
438 } else {
439 free_win = find_first_zero_bit(rk_pcie->ib_window_map,
440 rk_pcie->num_ib_windows);
441 if (free_win >= rk_pcie->num_ib_windows) {
442 dev_err(rk_pcie->pci->dev, "No free inbound window\n");
443 return -EINVAL;
444 }
445 }
446
447 ret = rk_pcie_prog_inbound_atu(rk_pcie->pci, func_no, free_win, bar,
448 cpu_addr, as_type);
449 if (ret < 0) {
450 dev_err(rk_pcie->pci->dev, "Failed to program IB window\n");
451 return ret;
452 }
453
454 if (rk_pcie->in_suspend)
455 return 0;
456
457 rk_pcie->bar_to_atu[bar] = free_win;
458 set_bit(free_win, rk_pcie->ib_window_map);
459
460 return 0;
461 }
462
rk_pcie_writel_ob_unroll(struct dw_pcie * pci,u32 index,u32 reg,u32 val)463 static void rk_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
464 u32 val)
465 {
466 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
467
468 rk_pcie_writel_atu(pci, offset + reg, val);
469 }
470
rk_pcie_readl_ob_unroll(struct dw_pcie * pci,u32 index,u32 reg)471 static u32 rk_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
472 {
473 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
474
475 return rk_pcie_readl_atu(pci, offset + reg);
476 }
477
rk_pcie_prog_outbound_atu_unroll(struct dw_pcie * pci,u8 func_no,int index,int type,u64 cpu_addr,u64 pci_addr,u32 size)478 static void rk_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no,
479 int index, int type,
480 u64 cpu_addr, u64 pci_addr,
481 u32 size)
482 {
483 u32 retries, val;
484 u64 limit_addr = cpu_addr + size - 1;
485
486 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
487 lower_32_bits(cpu_addr));
488 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
489 upper_32_bits(cpu_addr));
490 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_LIMIT,
491 lower_32_bits(limit_addr));
492 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_LIMIT,
493 upper_32_bits(limit_addr));
494 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
495 lower_32_bits(pci_addr));
496 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
497 upper_32_bits(pci_addr));
498 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
499 type | PCIE_ATU_FUNC_NUM(func_no));
500 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
501 PCIE_ATU_ENABLE);
502
503 /*
504 * Make sure ATU enable takes effect before any subsequent config
505 * and I/O accesses.
506 */
507 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
508 val = rk_pcie_readl_ob_unroll(pci, index,
509 PCIE_ATU_UNR_REGION_CTRL2);
510 if (val & PCIE_ATU_ENABLE)
511 return;
512
513 mdelay(LINK_WAIT_IATU);
514 }
515 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
516 }
517
rk_pcie_prog_outbound_atu(struct dw_pcie * pci,int index,int type,u64 cpu_addr,u64 pci_addr,u32 size)518 static void rk_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
519 int type, u64 cpu_addr, u64 pci_addr, u32 size)
520 {
521 u32 retries, val;
522
523 if (pci->ops->cpu_addr_fixup)
524 cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
525
526 if (pci->iatu_unroll_enabled) {
527 rk_pcie_prog_outbound_atu_unroll(pci, 0x0, index, type,
528 cpu_addr, pci_addr, size);
529 return;
530 }
531
532 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
533 PCIE_ATU_REGION_OUTBOUND | index);
534 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
535 lower_32_bits(cpu_addr));
536 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
537 upper_32_bits(cpu_addr));
538 dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
539 lower_32_bits(cpu_addr + size - 1));
540 dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
541 lower_32_bits(pci_addr));
542 dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
543 upper_32_bits(pci_addr));
544 dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type |
545 PCIE_ATU_FUNC_NUM(0x0));
546 dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
547
548 /*
549 * Make sure ATU enable takes effect before any subsequent config
550 * and I/O accesses.
551 */
552 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
553 val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
554 if (val & PCIE_ATU_ENABLE)
555 return;
556
557 mdelay(LINK_WAIT_IATU);
558 }
559 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
560 }
561
rk_pcie_ep_outbound_atu(struct rk_pcie * rk_pcie,phys_addr_t phys_addr,u64 pci_addr,size_t size)562 static int rk_pcie_ep_outbound_atu(struct rk_pcie *rk_pcie,
563 phys_addr_t phys_addr, u64 pci_addr,
564 size_t size)
565 {
566 u32 free_win;
567
568 if (rk_pcie->in_suspend) {
569 free_win = find_first_bit(rk_pcie->ob_window_map,
570 rk_pcie->num_ob_windows);
571 } else {
572 free_win = find_first_zero_bit(rk_pcie->ob_window_map,
573 rk_pcie->num_ob_windows);
574 if (free_win >= rk_pcie->num_ob_windows) {
575 dev_err(rk_pcie->pci->dev, "No free outbound window\n");
576 return -EINVAL;
577 }
578 }
579
580 rk_pcie_prog_outbound_atu(rk_pcie->pci, free_win, PCIE_ATU_TYPE_MEM,
581 phys_addr, pci_addr, size);
582
583 if (rk_pcie->in_suspend)
584 return 0;
585
586 set_bit(free_win, rk_pcie->ob_window_map);
587 rk_pcie->outbound_addr[free_win] = phys_addr;
588
589 return 0;
590 }
591
__rk_pcie_ep_reset_bar(struct rk_pcie * rk_pcie,enum pci_barno bar,int flags)592 static void __rk_pcie_ep_reset_bar(struct rk_pcie *rk_pcie,
593 enum pci_barno bar, int flags)
594 {
595 u32 reg;
596
597 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
598 dw_pcie_writel_dbi(rk_pcie->pci, reg, 0x0);
599 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
600 dw_pcie_writel_dbi(rk_pcie->pci, reg + 4, 0x0);
601 }
602
rk_pcie_ep_reset_bar(struct rk_pcie * rk_pcie,enum pci_barno bar)603 static void rk_pcie_ep_reset_bar(struct rk_pcie *rk_pcie, enum pci_barno bar)
604 {
605 __rk_pcie_ep_reset_bar(rk_pcie, bar, 0);
606 }
607
rk_pcie_ep_atu_init(struct rk_pcie * rk_pcie)608 static int rk_pcie_ep_atu_init(struct rk_pcie *rk_pcie)
609 {
610 int ret;
611 enum pci_barno bar;
612 enum dw_pcie_as_type as_type;
613 dma_addr_t cpu_addr;
614 phys_addr_t phys_addr;
615 u64 pci_addr;
616 size_t size;
617
618 for (bar = BAR_0; bar <= BAR_5; bar++)
619 rk_pcie_ep_reset_bar(rk_pcie, bar);
620
621 cpu_addr = rk_pcie->mem_start;
622 as_type = DW_PCIE_AS_MEM;
623 ret = rk_pcie_ep_inbound_atu(rk_pcie, BAR_0, cpu_addr, as_type);
624 if (ret)
625 return ret;
626
627 phys_addr = 0x0;
628 pci_addr = 0x0;
629 size = SZ_2G;
630 ret = rk_pcie_ep_outbound_atu(rk_pcie, phys_addr, pci_addr, size);
631 if (ret)
632 return ret;
633
634 return 0;
635 }
636
637 #if defined(CONFIG_PCIEASPM)
disable_aspm_l1ss(struct rk_pcie * rk_pcie)638 static void disable_aspm_l1ss(struct rk_pcie *rk_pcie)
639 {
640 u32 val, cfg_link_cap_l1sub;
641
642 val = dw_pcie_find_ext_capability(rk_pcie->pci, PCI_EXT_CAP_ID_L1SS);
643 if (!val) {
644 dev_err(rk_pcie->pci->dev, "can't find l1ss cap\n");
645
646 return;
647 }
648
649 cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
650
651 val = dw_pcie_readl_dbi(rk_pcie->pci, cfg_link_cap_l1sub);
652 val &= ~(PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2 | PCI_L1SS_CAP_L1_PM_SS);
653 dw_pcie_writel_dbi(rk_pcie->pci, cfg_link_cap_l1sub, val);
654 }
655 #else
disable_aspm_l1ss(struct rk_pcie * rk_pcie)656 static inline void disable_aspm_l1ss(struct rk_pcie *rk_pcie) { return; }
657 #endif
658
rk_pcie_set_mode(struct rk_pcie * rk_pcie)659 static inline void rk_pcie_set_mode(struct rk_pcie *rk_pcie)
660 {
661 switch (rk_pcie->mode) {
662 case RK_PCIE_EP_TYPE:
663 rk_pcie_writel_apb(rk_pcie, 0x0, 0xf00000);
664 break;
665 case RK_PCIE_RC_TYPE:
666 if (rk_pcie->supports_clkreq) {
667 /* Application is ready to have reference clock removed */
668 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_POWER, 0x00010001);
669 } else {
670 /* Pull down CLKREQ# to assert the connecting CLOCK_GEN OE */
671 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_POWER, 0x30011000);
672 disable_aspm_l1ss(rk_pcie);
673 }
674 rk_pcie_writel_apb(rk_pcie, 0x0, 0xf00040);
675 /*
676 * Disable order rule for CPL can't pass halted P queue.
677 * Need to check producer-consumer model.
678 * Just for RK1808 platform.
679 */
680 if (rk_pcie->is_rk1808)
681 dw_pcie_writel_dbi(rk_pcie->pci,
682 PCIE_PL_ORDER_RULE_CTRL_OFF,
683 0xff00);
684 break;
685 }
686 }
687
rk_pcie_link_status_clear(struct rk_pcie * rk_pcie)688 static inline void rk_pcie_link_status_clear(struct rk_pcie *rk_pcie)
689 {
690 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_GENERAL_DEBUG, 0x0);
691 }
692
rk_pcie_disable_ltssm(struct rk_pcie * rk_pcie)693 static inline void rk_pcie_disable_ltssm(struct rk_pcie *rk_pcie)
694 {
695 rk_pcie_writel_apb(rk_pcie, 0x0, 0xc0008);
696 }
697
rk_pcie_enable_ltssm(struct rk_pcie * rk_pcie)698 static inline void rk_pcie_enable_ltssm(struct rk_pcie *rk_pcie)
699 {
700 rk_pcie_writel_apb(rk_pcie, 0x0, 0xC000C);
701 }
702
rk_pcie_link_up(struct dw_pcie * pci)703 static int rk_pcie_link_up(struct dw_pcie *pci)
704 {
705 struct rk_pcie *rk_pcie = to_rk_pcie(pci);
706 u32 val;
707
708 if (rk_pcie->is_rk1808) {
709 val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_GENERAL_DEBUG);
710 if ((val & (PCIE_PHY_LINKUP | PCIE_DATA_LINKUP)) == 0x3)
711 return 1;
712 } else {
713 val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS);
714 if ((val & (RDLH_LINKUP | SMLH_LINKUP)) == 0x30000)
715 return 1;
716 }
717
718 return 0;
719 }
720
rk_pcie_enable_debug(struct rk_pcie * rk_pcie)721 static void rk_pcie_enable_debug(struct rk_pcie *rk_pcie)
722 {
723 if (!IS_ENABLED(CONFIG_DEBUG_FS))
724 return;
725 if (rk_pcie->is_rk1808 == true)
726 return;
727
728 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_PTN_HIT_D0,
729 PCIE_CLIENT_DBG_TRANSITION_DATA);
730 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_PTN_HIT_D1,
731 PCIE_CLIENT_DBG_TRANSITION_DATA);
732 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_TRN_HIT_D0,
733 PCIE_CLIENT_DBG_TRANSITION_DATA);
734 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_TRN_HIT_D1,
735 PCIE_CLIENT_DBG_TRANSITION_DATA);
736 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_MODE_CON,
737 PCIE_CLIENT_DBF_EN);
738 }
739
rk_pcie_debug_dump(struct rk_pcie * rk_pcie)740 static void rk_pcie_debug_dump(struct rk_pcie *rk_pcie)
741 {
742 #if RK_PCIE_DBG
743 u32 loop;
744 struct dw_pcie *pci = rk_pcie->pci;
745
746 dev_info(pci->dev, "ltssm = 0x%x\n",
747 rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
748 for (loop = 0; loop < 64; loop++)
749 dev_info(pci->dev, "fifo_status = 0x%x\n",
750 rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_STATUS));
751 #endif
752 }
753
rk_pcie_establish_link(struct dw_pcie * pci)754 static int rk_pcie_establish_link(struct dw_pcie *pci)
755 {
756 int retries, power;
757 struct rk_pcie *rk_pcie = to_rk_pcie(pci);
758 bool std_rc = rk_pcie->mode == RK_PCIE_RC_TYPE && !rk_pcie->dma_obj;
759
760 /*
761 * For standard RC, even if the link has been setup by firmware,
762 * we still need to reset link as we need to remove all resource info
763 * from devices, for instance BAR, as it wasn't assigned by kernel.
764 */
765 if (dw_pcie_link_up(pci) && !std_rc) {
766 dev_err(pci->dev, "link is already up\n");
767 return 0;
768 }
769
770 /* Rest the device */
771 gpiod_set_value_cansleep(rk_pcie->rst_gpio, 0);
772
773 rk_pcie_disable_ltssm(rk_pcie);
774 rk_pcie_link_status_clear(rk_pcie);
775 rk_pcie_enable_debug(rk_pcie);
776
777 /* Enable client reset or link down interrupt */
778 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0x40000);
779
780 /* Enable LTSSM */
781 rk_pcie_enable_ltssm(rk_pcie);
782
783 /*
784 * In resume routine, function devices' resume function must be late after
785 * controllers'. Some devices, such as Wi-Fi, need special IO setting before
786 * finishing training. So there must be timeout here. These kinds of devices
787 * need rescan devices by its driver when used. So no need to waste time waiting
788 * for training pass.
789 */
790 if (rk_pcie->in_suspend && rk_pcie->skip_scan_in_resume) {
791 rfkill_get_wifi_power_state(&power);
792 if (!power) {
793 gpiod_set_value_cansleep(rk_pcie->rst_gpio, 1);
794 return 0;
795 }
796 }
797
798 /*
799 * PCIe requires the refclk to be stable for 100µs prior to releasing
800 * PERST and T_PVPERL (Power stable to PERST# inactive) should be a
801 * minimum of 100ms. See table 2-4 in section 2.6.2 AC, the PCI Express
802 * Card Electromechanical Specification 3.0. So 100ms in total is the min
803 * requuirement here. We add a 200ms by default for sake of hoping everthings
804 * work fine. If it doesn't, please add more in DT node by add rockchip,perst-inactive-ms.
805 */
806 msleep(rk_pcie->perst_inactive_ms);
807 gpiod_set_value_cansleep(rk_pcie->rst_gpio, 1);
808
809 /*
810 * Add this 1ms delay because we observe link is always up stably after it and
811 * could help us save 20ms for scanning devices.
812 */
813 usleep_range(1000, 1100);
814
815 for (retries = 0; retries < 100; retries++) {
816 if (dw_pcie_link_up(pci)) {
817 /*
818 * We may be here in case of L0 in Gen1. But if EP is capable
819 * of Gen2 or Gen3, Gen switch may happen just in this time, but
820 * we keep on accessing devices in unstable link status. Given
821 * that LTSSM max timeout is 24ms per period, we can wait a bit
822 * more for Gen switch.
823 */
824 msleep(50);
825 /* In case link drop after linkup, double check it */
826 if (dw_pcie_link_up(pci)) {
827 dev_info(pci->dev, "PCIe Link up, LTSSM is 0x%x\n",
828 rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
829 rk_pcie_debug_dump(rk_pcie);
830 return 0;
831 }
832 }
833
834 dev_info_ratelimited(pci->dev, "PCIe Linking... LTSSM is 0x%x\n",
835 rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
836 rk_pcie_debug_dump(rk_pcie);
837 msleep(20);
838 }
839
840 dev_err(pci->dev, "PCIe Link Fail\n");
841
842 return rk_pcie->is_signal_test == true ? 0 : -EINVAL;
843 }
844
rk_pcie_udma_enabled(struct rk_pcie * rk_pcie)845 static bool rk_pcie_udma_enabled(struct rk_pcie *rk_pcie)
846 {
847 return dw_pcie_readl_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
848 PCIE_DMA_CTRL_OFF);
849 }
850
rk_pcie_init_dma_trx(struct rk_pcie * rk_pcie)851 static int rk_pcie_init_dma_trx(struct rk_pcie *rk_pcie)
852 {
853 if (!rk_pcie_udma_enabled(rk_pcie))
854 return 0;
855
856 rk_pcie->dma_obj = rk_pcie_dma_obj_probe(rk_pcie->pci->dev);
857 if (IS_ERR(rk_pcie->dma_obj)) {
858 dev_err(rk_pcie->pci->dev, "failed to prepare dma object\n");
859 return -EINVAL;
860 } else if (rk_pcie->dma_obj) {
861 goto out;
862 }
863
864 rk_pcie->dma_obj = pcie_dw_dmatest_register(rk_pcie->pci->dev, true);
865 if (IS_ERR(rk_pcie->dma_obj)) {
866 dev_err(rk_pcie->pci->dev, "failed to prepare dmatest\n");
867 return -EINVAL;
868 }
869 out:
870 /* Enable client write and read interrupt */
871 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0xc000000);
872
873 /* Enable core write interrupt */
874 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_MASK,
875 0x0);
876 /* Enable core read interrupt */
877 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_MASK,
878 0x0);
879 return 0;
880 }
881
rk_pci_find_resbar_capability(struct rk_pcie * rk_pcie)882 static int rk_pci_find_resbar_capability(struct rk_pcie *rk_pcie)
883 {
884 u32 header;
885 int ttl;
886 int start = 0;
887 int pos = PCI_CFG_SPACE_SIZE;
888 int cap = PCI_EXT_CAP_ID_REBAR;
889
890 /* minimum 8 bytes per capability */
891 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
892
893 header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
894
895 /*
896 * If we have no capabilities, this is indicated by cap ID,
897 * cap version and next pointer all being 0.
898 */
899 if (header == 0)
900 return 0;
901
902 while (ttl-- > 0) {
903 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
904 return pos;
905
906 pos = PCI_EXT_CAP_NEXT(header);
907 if (pos < PCI_CFG_SPACE_SIZE)
908 break;
909
910 header = dw_pcie_readl_dbi(rk_pcie->pci, pos);
911 if (!header)
912 break;
913 }
914
915 return 0;
916 }
917
918 #ifdef MODULE
dw_pcie_write_dbi2(struct dw_pcie * pci,u32 reg,size_t size,u32 val)919 void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
920 {
921 int ret;
922
923 if (pci->ops && pci->ops->write_dbi2) {
924 pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
925 return;
926 }
927
928 ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
929 if (ret)
930 dev_err(pci->dev, "write DBI address failed\n");
931 }
932 #endif
933
rk_pcie_ep_set_bar_flag(struct rk_pcie * rk_pcie,enum pci_barno barno,int flags)934 static int rk_pcie_ep_set_bar_flag(struct rk_pcie *rk_pcie, enum pci_barno barno, int flags)
935 {
936 enum pci_barno bar = barno;
937 u32 reg;
938
939 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
940
941 /* Disabled the upper 32bits BAR to make a 64bits bar pair */
942 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
943 dw_pcie_writel_dbi2(rk_pcie->pci, reg + 4, 0);
944
945 dw_pcie_writel_dbi(rk_pcie->pci, reg, flags);
946 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
947 dw_pcie_writel_dbi(rk_pcie->pci, reg + 4, 0);
948
949 return 0;
950 }
951
rk_pcie_ep_setup(struct rk_pcie * rk_pcie)952 static void rk_pcie_ep_setup(struct rk_pcie *rk_pcie)
953 {
954 int ret;
955 u32 val;
956 u32 lanes;
957 struct device *dev = rk_pcie->pci->dev;
958 struct device_node *np = dev->of_node;
959 int resbar_base;
960 int bar;
961
962 /* Enable client write and read interrupt */
963 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK, 0xc000000);
964
965 /* Enable core write interrupt */
966 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_MASK,
967 0x0);
968 /* Enable core read interrupt */
969 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_MASK,
970 0x0);
971
972 ret = of_property_read_u32(np, "num-lanes", &lanes);
973 if (ret)
974 lanes = 0;
975
976 /* Set the number of lanes */
977 val = dw_pcie_readl_dbi(rk_pcie->pci, PCIE_PORT_LINK_CONTROL);
978 val &= ~PORT_LINK_MODE_MASK;
979 switch (lanes) {
980 case 1:
981 val |= PORT_LINK_MODE_1_LANES;
982 break;
983 case 2:
984 val |= PORT_LINK_MODE_2_LANES;
985 break;
986 case 4:
987 val |= PORT_LINK_MODE_4_LANES;
988 break;
989 case 8:
990 val |= PORT_LINK_MODE_8_LANES;
991 break;
992 default:
993 dev_err(dev, "num-lanes %u: invalid value\n", lanes);
994 return;
995 }
996
997 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_PORT_LINK_CONTROL, val);
998
999 /* Set link width speed control register */
1000 val = dw_pcie_readl_dbi(rk_pcie->pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
1001 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
1002 switch (lanes) {
1003 case 1:
1004 val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
1005 break;
1006 case 2:
1007 val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
1008 break;
1009 case 4:
1010 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
1011 break;
1012 case 8:
1013 val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
1014 break;
1015 }
1016
1017 val |= PCIE_DIRECT_SPEED_CHANGE;
1018
1019 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
1020
1021 /* Enable bus master and memory space */
1022 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_TYPE0_STATUS_COMMAND_REG, 0x6);
1023
1024 resbar_base = rk_pci_find_resbar_capability(rk_pcie);
1025 if (!resbar_base) {
1026 dev_warn(dev, "failed to find resbar_base\n");
1027 } else {
1028 /* Resize BAR0 to support 512GB, BAR1 to support 8M, BAR2~5 to support 64M */
1029 dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x4, 0xfffff0);
1030 dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x8, 0x13c0);
1031 dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0xc, 0xfffff0);
1032 dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x10, 0x3c0);
1033 for (bar = 2; bar < 6; bar++) {
1034 dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x4 + bar * 0x8, 0xfffff0);
1035 dw_pcie_writel_dbi(rk_pcie->pci, resbar_base + 0x8 + bar * 0x8, 0x6c0);
1036 }
1037
1038 /* Set flags */
1039 rk_pcie_ep_set_bar_flag(rk_pcie, BAR_0, PCI_BASE_ADDRESS_MEM_TYPE_32);
1040 rk_pcie_ep_set_bar_flag(rk_pcie, BAR_1, PCI_BASE_ADDRESS_MEM_TYPE_32);
1041 rk_pcie_ep_set_bar_flag(rk_pcie, BAR_2, PCI_BASE_ADDRESS_MEM_PREFETCH | PCI_BASE_ADDRESS_MEM_TYPE_64);
1042 rk_pcie_ep_set_bar_flag(rk_pcie, BAR_4, PCI_BASE_ADDRESS_MEM_PREFETCH | PCI_BASE_ADDRESS_MEM_TYPE_64);
1043 }
1044
1045 /* Device id and class id needed for request bar address */
1046 dw_pcie_writew_dbi(rk_pcie->pci, PCI_DEVICE_ID, 0x356a);
1047 dw_pcie_writew_dbi(rk_pcie->pci, PCI_CLASS_DEVICE, 0x0580);
1048
1049 /* Set shadow BAR0 */
1050 if (rk_pcie->is_rk1808) {
1051 val = rk_pcie->mem_size - 1;
1052 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_SB_BAR0_MASK_REG, val);
1053 }
1054 }
1055
rk_pcie_ep_win_parse(struct rk_pcie * rk_pcie)1056 static int rk_pcie_ep_win_parse(struct rk_pcie *rk_pcie)
1057 {
1058 int ret;
1059 void *addr;
1060 struct device *dev = rk_pcie->pci->dev;
1061 struct device_node *np = dev->of_node;
1062
1063 ret = of_property_read_u32(np, "num-ib-windows",
1064 &rk_pcie->num_ib_windows);
1065 if (ret < 0) {
1066 dev_err(dev, "unable to read *num-ib-windows* property\n");
1067 return ret;
1068 }
1069
1070 if (rk_pcie->num_ib_windows > MAX_IATU_IN) {
1071 dev_err(dev, "Invalid *num-ib-windows*\n");
1072 return -EINVAL;
1073 }
1074
1075 ret = of_property_read_u32(np, "num-ob-windows",
1076 &rk_pcie->num_ob_windows);
1077 if (ret < 0) {
1078 dev_err(dev, "Unable to read *num-ob-windows* property\n");
1079 return ret;
1080 }
1081
1082 if (rk_pcie->num_ob_windows > MAX_IATU_OUT) {
1083 dev_err(dev, "Invalid *num-ob-windows*\n");
1084 return -EINVAL;
1085 }
1086
1087 rk_pcie->ib_window_map = devm_kcalloc(dev,
1088 BITS_TO_LONGS(rk_pcie->num_ib_windows),
1089 sizeof(long), GFP_KERNEL);
1090 if (!rk_pcie->ib_window_map)
1091 return -ENOMEM;
1092
1093 rk_pcie->ob_window_map = devm_kcalloc(dev,
1094 BITS_TO_LONGS(rk_pcie->num_ob_windows),
1095 sizeof(long), GFP_KERNEL);
1096 if (!rk_pcie->ob_window_map)
1097 return -ENOMEM;
1098
1099 addr = devm_kcalloc(dev, rk_pcie->num_ob_windows, sizeof(phys_addr_t),
1100 GFP_KERNEL);
1101 if (!addr)
1102 return -ENOMEM;
1103
1104 rk_pcie->outbound_addr = addr;
1105
1106 return 0;
1107 }
1108
rk_pcie_msi_host_init(struct pcie_port * pp)1109 static int rk_pcie_msi_host_init(struct pcie_port *pp)
1110 {
1111 return 0;
1112 }
1113
rk_pcie_msi_set_num_vectors(struct pcie_port * pp)1114 static void rk_pcie_msi_set_num_vectors(struct pcie_port *pp)
1115 {
1116 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1117 struct rk_pcie *rk_pcie = to_rk_pcie(pci);
1118
1119 pp->num_vectors = rk_pcie->msi_vector_num;
1120 }
1121
rk_pcie_host_init(struct pcie_port * pp)1122 static int rk_pcie_host_init(struct pcie_port *pp)
1123 {
1124 int ret;
1125 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1126
1127 dw_pcie_setup_rc(pp);
1128
1129 /* Disable BAR0 BAR1 */
1130 dw_pcie_writel_dbi(pci, PCIE_TYPE0_HDR_DBI2_OFFSET + 0x10 + BAR_0 * 4, 0);
1131 dw_pcie_writel_dbi(pci, PCIE_TYPE0_HDR_DBI2_OFFSET + 0x10 + BAR_1 * 4, 0);
1132
1133 ret = rk_pcie_establish_link(pci);
1134
1135 if (pp->msi_irq > 0)
1136 dw_pcie_msi_init(pp);
1137
1138 return ret;
1139 }
1140
1141 static struct dw_pcie_host_ops rk_pcie_host_ops = {
1142 .host_init = rk_pcie_host_init,
1143 };
1144
rk_add_pcie_port(struct rk_pcie * rk_pcie,struct platform_device * pdev)1145 static int rk_add_pcie_port(struct rk_pcie *rk_pcie, struct platform_device *pdev)
1146 {
1147 int ret;
1148 struct dw_pcie *pci = rk_pcie->pci;
1149 struct pcie_port *pp = &pci->pp;
1150 struct device *dev = pci->dev;
1151
1152 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1153 pp->msi_irq = platform_get_irq_byname(pdev, "msi");
1154 /* If msi_irq is invalid, use outband msi routine */
1155 if (pp->msi_irq < 0) {
1156 dev_info(dev, "use outband MSI support");
1157 rk_pcie_host_ops.msi_host_init = rk_pcie_msi_host_init;
1158 } else {
1159 dev_info(dev, "max MSI vector is %d\n", rk_pcie->msi_vector_num);
1160 rk_pcie_host_ops.set_num_vectors = rk_pcie_msi_set_num_vectors;
1161 }
1162 }
1163
1164 pp->ops = &rk_pcie_host_ops;
1165
1166 ret = dw_pcie_host_init(pp);
1167 if (ret) {
1168 dev_err(dev, "failed to initialize host\n");
1169 return ret;
1170 }
1171
1172 return 0;
1173 }
1174
rk_pcie_add_ep(struct rk_pcie * rk_pcie)1175 static int rk_pcie_add_ep(struct rk_pcie *rk_pcie)
1176 {
1177 int ret;
1178 struct device *dev = rk_pcie->pci->dev;
1179 struct device_node *np = dev->of_node;
1180 struct device_node *mem;
1181 struct resource reg;
1182
1183 mem = of_parse_phandle(np, "memory-region", 0);
1184 if (!mem) {
1185 dev_err(dev, "missing \"memory-region\" property\n");
1186 return -ENODEV;
1187 }
1188
1189 ret = of_address_to_resource(mem, 0, ®);
1190 if (ret < 0) {
1191 dev_err(dev, "missing \"reg\" property\n");
1192 return ret;
1193 }
1194
1195 rk_pcie->mem_start = reg.start;
1196 rk_pcie->mem_size = resource_size(®);
1197
1198 ret = rk_pcie_ep_win_parse(rk_pcie);
1199 if (ret) {
1200 dev_err(dev, "failed to parse ep dts\n");
1201 return ret;
1202 }
1203
1204 rk_pcie->pci->dbi_base2 = rk_pcie->pci->dbi_base + PCIE_TYPE0_HDR_DBI2_OFFSET;
1205 rk_pcie->pci->atu_base = rk_pcie->pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
1206 rk_pcie->pci->iatu_unroll_enabled = rk_pcie_iatu_unroll_enabled(rk_pcie->pci);
1207
1208 ret = rk_pcie_ep_atu_init(rk_pcie);
1209 if (ret) {
1210 dev_err(dev, "failed to init ep device\n");
1211 return ret;
1212 }
1213
1214 rk_pcie_ep_setup(rk_pcie);
1215
1216 ret = rk_pcie_establish_link(rk_pcie->pci);
1217 if (ret) {
1218 dev_err(dev, "failed to establish pcie link\n");
1219 return ret;
1220 }
1221
1222 if (!rk_pcie_udma_enabled(rk_pcie))
1223 return 0;
1224
1225 return 0;
1226 }
1227
rk_pcie_clk_init(struct rk_pcie * rk_pcie)1228 static int rk_pcie_clk_init(struct rk_pcie *rk_pcie)
1229 {
1230 struct device *dev = rk_pcie->pci->dev;
1231 int ret;
1232
1233 rk_pcie->clk_cnt = devm_clk_bulk_get_all(dev, &rk_pcie->clks);
1234 if (rk_pcie->clk_cnt < 1)
1235 return -ENODEV;
1236
1237 ret = clk_bulk_prepare_enable(rk_pcie->clk_cnt, rk_pcie->clks);
1238 if (ret) {
1239 dev_err(dev, "failed to prepare enable pcie bulk clks: %d\n", ret);
1240 return ret;
1241 }
1242
1243 return 0;
1244 }
1245
rk_pcie_resource_get(struct platform_device * pdev,struct rk_pcie * rk_pcie)1246 static int rk_pcie_resource_get(struct platform_device *pdev,
1247 struct rk_pcie *rk_pcie)
1248 {
1249 struct resource *dbi_base;
1250 struct resource *apb_base;
1251
1252 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1253 "pcie-dbi");
1254 if (!dbi_base) {
1255 dev_err(&pdev->dev, "get pcie-dbi failed\n");
1256 return -ENODEV;
1257 }
1258
1259 rk_pcie->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
1260 if (IS_ERR(rk_pcie->dbi_base))
1261 return PTR_ERR(rk_pcie->dbi_base);
1262
1263 rk_pcie->pci->dbi_base = rk_pcie->dbi_base;
1264
1265 apb_base = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1266 "pcie-apb");
1267 if (!apb_base) {
1268 dev_err(&pdev->dev, "get pcie-apb failed\n");
1269 return -ENODEV;
1270 }
1271 rk_pcie->apb_base = devm_ioremap_resource(&pdev->dev, apb_base);
1272 if (IS_ERR(rk_pcie->apb_base))
1273 return PTR_ERR(rk_pcie->apb_base);
1274
1275 /*
1276 * Rest the device before enabling power because some of the
1277 * platforms may use external refclk input with the some power
1278 * rail connect to 100MHz OSC chip. So once the power is up for
1279 * the slot and the refclk is available, which isn't quite follow
1280 * the spec. We should make sure it is in reset state before
1281 * everthing's ready.
1282 */
1283 rk_pcie->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
1284 GPIOD_OUT_LOW);
1285 if (IS_ERR(rk_pcie->rst_gpio)) {
1286 dev_err(&pdev->dev, "invalid reset-gpios property in node\n");
1287 return PTR_ERR(rk_pcie->rst_gpio);
1288 }
1289
1290 if (device_property_read_u32(&pdev->dev, "rockchip,perst-inactive-ms",
1291 &rk_pcie->perst_inactive_ms))
1292 rk_pcie->perst_inactive_ms = 200;
1293
1294 rk_pcie->prsnt_gpio = devm_gpiod_get_optional(&pdev->dev, "prsnt", GPIOD_IN);
1295 if (IS_ERR_OR_NULL(rk_pcie->prsnt_gpio))
1296 dev_info(&pdev->dev, "invalid prsnt-gpios property in node\n");
1297
1298 return 0;
1299 }
1300
rk_pcie_phy_init(struct rk_pcie * rk_pcie)1301 static int rk_pcie_phy_init(struct rk_pcie *rk_pcie)
1302 {
1303 int ret;
1304 struct device *dev = rk_pcie->pci->dev;
1305
1306 rk_pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
1307 if (IS_ERR(rk_pcie->phy)) {
1308 if (PTR_ERR(rk_pcie->phy) != -EPROBE_DEFER)
1309 dev_info(dev, "missing phy\n");
1310 return PTR_ERR(rk_pcie->phy);
1311 }
1312
1313 switch (rk_pcie->mode) {
1314 case RK_PCIE_RC_TYPE:
1315 rk_pcie->phy_mode = PHY_MODE_PCIE; /* make no sense */
1316 rk_pcie->phy_sub_mode = PHY_MODE_PCIE_RC;
1317 break;
1318 case RK_PCIE_EP_TYPE:
1319 rk_pcie->phy_mode = PHY_MODE_PCIE;
1320 rk_pcie->phy_sub_mode = PHY_MODE_PCIE_EP;
1321 break;
1322 }
1323
1324 ret = phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
1325 rk_pcie->phy_sub_mode);
1326 if (ret) {
1327 dev_err(dev, "fail to set phy to mode %s, err %d\n",
1328 (rk_pcie->phy_sub_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
1329 ret);
1330 return ret;
1331 }
1332
1333 if (rk_pcie->bifurcation)
1334 phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
1335 PHY_MODE_PCIE_BIFURCATION);
1336
1337 ret = phy_init(rk_pcie->phy);
1338 if (ret < 0) {
1339 dev_err(dev, "fail to init phy, err %d\n", ret);
1340 return ret;
1341 }
1342
1343 phy_power_on(rk_pcie->phy);
1344
1345 return 0;
1346 }
1347
rk_pcie_reset_grant_ctrl(struct rk_pcie * rk_pcie,bool enable)1348 static int rk_pcie_reset_grant_ctrl(struct rk_pcie *rk_pcie,
1349 bool enable)
1350 {
1351 int ret;
1352 u32 val = (0x1 << 18); /* Write mask bit */
1353
1354 if (enable)
1355 val |= (0x1 << 2);
1356
1357 ret = regmap_write(rk_pcie->usb_pcie_grf, 0x0, val);
1358 return ret;
1359 }
1360
rk_pcie_start_dma_rd(struct dma_trx_obj * obj,struct dma_table * cur,int ctr_off)1361 static void rk_pcie_start_dma_rd(struct dma_trx_obj *obj, struct dma_table *cur, int ctr_off)
1362 {
1363 struct rk_pcie *rk_pcie = dev_get_drvdata(obj->dev);
1364
1365 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_ENB,
1366 cur->enb.asdword);
1367 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_CTRL_LO,
1368 cur->ctx_reg.ctrllo.asdword);
1369 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_CTRL_HI,
1370 cur->ctx_reg.ctrlhi.asdword);
1371 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_XFERSIZE,
1372 cur->ctx_reg.xfersize);
1373 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_SAR_PTR_LO,
1374 cur->ctx_reg.sarptrlo);
1375 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_SAR_PTR_HI,
1376 cur->ctx_reg.sarptrhi);
1377 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_DAR_PTR_LO,
1378 cur->ctx_reg.darptrlo);
1379 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_RD_DAR_PTR_HI,
1380 cur->ctx_reg.darptrhi);
1381 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_DOORBELL,
1382 cur->start.asdword);
1383 }
1384
rk_pcie_start_dma_wr(struct dma_trx_obj * obj,struct dma_table * cur,int ctr_off)1385 static void rk_pcie_start_dma_wr(struct dma_trx_obj *obj, struct dma_table *cur, int ctr_off)
1386 {
1387 struct rk_pcie *rk_pcie = dev_get_drvdata(obj->dev);
1388
1389 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_ENB,
1390 cur->enb.asdword);
1391 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_CTRL_LO,
1392 cur->ctx_reg.ctrllo.asdword);
1393 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_CTRL_HI,
1394 cur->ctx_reg.ctrlhi.asdword);
1395 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_XFERSIZE,
1396 cur->ctx_reg.xfersize);
1397 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_SAR_PTR_LO,
1398 cur->ctx_reg.sarptrlo);
1399 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_SAR_PTR_HI,
1400 cur->ctx_reg.sarptrhi);
1401 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_DAR_PTR_LO,
1402 cur->ctx_reg.darptrlo);
1403 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_DAR_PTR_HI,
1404 cur->ctx_reg.darptrhi);
1405 dw_pcie_writel_dbi(rk_pcie->pci, ctr_off + PCIE_DMA_WR_WEILO,
1406 cur->weilo.asdword);
1407 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_DOORBELL,
1408 cur->start.asdword);
1409 }
1410
rk_pcie_start_dma_dwc(struct dma_trx_obj * obj,struct dma_table * table)1411 static void rk_pcie_start_dma_dwc(struct dma_trx_obj *obj, struct dma_table *table)
1412 {
1413 int dir = table->dir;
1414 int chn = table->chn;
1415
1416 int ctr_off = PCIE_DMA_OFFSET + chn * 0x200;
1417
1418 if (dir == DMA_FROM_BUS)
1419 rk_pcie_start_dma_rd(obj, table, ctr_off);
1420 else if (dir == DMA_TO_BUS)
1421 rk_pcie_start_dma_wr(obj, table, ctr_off);
1422 }
1423
rk_pcie_config_dma_dwc(struct dma_table * table)1424 static void rk_pcie_config_dma_dwc(struct dma_table *table)
1425 {
1426 table->enb.enb = 0x1;
1427 table->ctx_reg.ctrllo.lie = 0x1;
1428 table->ctx_reg.ctrllo.rie = 0x0;
1429 table->ctx_reg.ctrllo.td = 0x1;
1430 table->ctx_reg.ctrlhi.asdword = 0x0;
1431 table->ctx_reg.xfersize = table->buf_size;
1432 if (table->dir == DMA_FROM_BUS) {
1433 table->ctx_reg.sarptrlo = (u32)(table->bus & 0xffffffff);
1434 table->ctx_reg.sarptrhi = (u32)(table->bus >> 32);
1435 table->ctx_reg.darptrlo = (u32)(table->local & 0xffffffff);
1436 table->ctx_reg.darptrhi = (u32)(table->local >> 32);
1437 } else if (table->dir == DMA_TO_BUS) {
1438 table->ctx_reg.sarptrlo = (u32)(table->local & 0xffffffff);
1439 table->ctx_reg.sarptrhi = (u32)(table->local >> 32);
1440 table->ctx_reg.darptrlo = (u32)(table->bus & 0xffffffff);
1441 table->ctx_reg.darptrhi = (u32)(table->bus >> 32);
1442 }
1443 table->weilo.weight0 = 0x0;
1444 table->start.stop = 0x0;
1445 table->start.chnl = table->chn;
1446 }
1447
rk_pcie_hot_rst_work(struct work_struct * work)1448 static void rk_pcie_hot_rst_work(struct work_struct *work)
1449 {
1450 struct rk_pcie *rk_pcie = container_of(work, struct rk_pcie, hot_rst_work);
1451 u32 val, status;
1452 int ret;
1453
1454 /* Setup command register */
1455 val = dw_pcie_readl_dbi(rk_pcie->pci, PCI_COMMAND);
1456 val &= 0xffff0000;
1457 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
1458 PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
1459 dw_pcie_writel_dbi(rk_pcie->pci, PCI_COMMAND, val);
1460
1461 if (rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_HOT_RESET_CTRL) & PCIE_LTSSM_APP_DLY2_EN) {
1462 ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_LTSSM_STATUS,
1463 status, ((status & 0x3F) == 0), 100, RK_PCIE_HOTRESET_TMOUT_US);
1464 if (ret)
1465 dev_err(rk_pcie->pci->dev, "wait for detect quiet failed!\n");
1466
1467 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_HOT_RESET_CTRL,
1468 (PCIE_LTSSM_APP_DLY2_DONE) | ((PCIE_LTSSM_APP_DLY2_DONE) << 16));
1469 }
1470 }
1471
rk_pcie_sys_irq_handler(int irq,void * arg)1472 static irqreturn_t rk_pcie_sys_irq_handler(int irq, void *arg)
1473 {
1474 struct rk_pcie *rk_pcie = arg;
1475 u32 chn;
1476 union int_status status;
1477 union int_clear clears;
1478 u32 reg;
1479
1480 status.asdword = dw_pcie_readl_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
1481 PCIE_DMA_WR_INT_STATUS);
1482 for (chn = 0; chn < PCIE_DMA_CHANEL_MAX_NUM; chn++) {
1483 if (status.donesta & BIT(chn)) {
1484 clears.doneclr = 0x1 << chn;
1485 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
1486 PCIE_DMA_WR_INT_CLEAR, clears.asdword);
1487 if (rk_pcie->dma_obj && rk_pcie->dma_obj->cb)
1488 rk_pcie->dma_obj->cb(rk_pcie->dma_obj, chn, DMA_TO_BUS);
1489 }
1490
1491 if (status.abortsta & BIT(chn)) {
1492 dev_err(rk_pcie->pci->dev, "%s, abort\n", __func__);
1493 clears.abortclr = 0x1 << chn;
1494 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
1495 PCIE_DMA_WR_INT_CLEAR, clears.asdword);
1496 }
1497 }
1498
1499 status.asdword = dw_pcie_readl_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
1500 PCIE_DMA_RD_INT_STATUS);
1501 for (chn = 0; chn < PCIE_DMA_CHANEL_MAX_NUM; chn++) {
1502 if (status.donesta & BIT(chn)) {
1503 clears.doneclr = 0x1 << chn;
1504 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
1505 PCIE_DMA_RD_INT_CLEAR, clears.asdword);
1506 if (rk_pcie->dma_obj && rk_pcie->dma_obj->cb)
1507 rk_pcie->dma_obj->cb(rk_pcie->dma_obj, chn, DMA_FROM_BUS);
1508 }
1509
1510 if (status.abortsta & BIT(chn)) {
1511 dev_err(rk_pcie->pci->dev, "%s, abort\n", __func__);
1512 clears.abortclr = 0x1 << chn;
1513 dw_pcie_writel_dbi(rk_pcie->pci, PCIE_DMA_OFFSET +
1514 PCIE_DMA_RD_INT_CLEAR, clears.asdword);
1515 }
1516 }
1517
1518 reg = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_INTR_STATUS_MISC);
1519 if (reg & BIT(2))
1520 queue_work(rk_pcie->hot_rst_wq, &rk_pcie->hot_rst_work);
1521
1522 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_STATUS_MISC, reg);
1523
1524 return IRQ_HANDLED;
1525 }
1526
rk_pcie_request_sys_irq(struct rk_pcie * rk_pcie,struct platform_device * pdev)1527 static int rk_pcie_request_sys_irq(struct rk_pcie *rk_pcie,
1528 struct platform_device *pdev)
1529 {
1530 int irq;
1531 int ret;
1532
1533 irq = platform_get_irq_byname(pdev, "sys");
1534 if (irq < 0) {
1535 dev_err(rk_pcie->pci->dev, "missing sys IRQ resource\n");
1536 return -EINVAL;
1537 }
1538
1539 ret = devm_request_irq(rk_pcie->pci->dev, irq, rk_pcie_sys_irq_handler,
1540 IRQF_SHARED, "pcie-sys", rk_pcie);
1541 if (ret) {
1542 dev_err(rk_pcie->pci->dev, "failed to request PCIe subsystem IRQ\n");
1543 return ret;
1544 }
1545
1546 return 0;
1547 }
1548
1549 static const struct rk_pcie_of_data rk_pcie_rc_of_data = {
1550 .mode = RK_PCIE_RC_TYPE,
1551 };
1552
1553 static const struct rk_pcie_of_data rk_pcie_ep_of_data = {
1554 .mode = RK_PCIE_EP_TYPE,
1555 };
1556
1557 static const struct rk_pcie_of_data rk3528_pcie_rc_of_data = {
1558 .mode = RK_PCIE_RC_TYPE,
1559 .msi_vector_num = 8,
1560 };
1561
1562 static const struct of_device_id rk_pcie_of_match[] = {
1563 {
1564 .compatible = "rockchip,rk1808-pcie",
1565 .data = &rk_pcie_rc_of_data,
1566 },
1567 {
1568 .compatible = "rockchip,rk1808-pcie-ep",
1569 .data = &rk_pcie_ep_of_data,
1570 },
1571 {
1572 .compatible = "rockchip,rk3528-pcie",
1573 .data = &rk3528_pcie_rc_of_data,
1574 },
1575 {
1576 .compatible = "rockchip,rk3562-pcie",
1577 .data = &rk3528_pcie_rc_of_data,
1578 },
1579 {
1580 .compatible = "rockchip,rk3568-pcie",
1581 .data = &rk_pcie_rc_of_data,
1582 },
1583 {
1584 .compatible = "rockchip,rk3568-pcie-ep",
1585 .data = &rk_pcie_ep_of_data,
1586 },
1587 {
1588 .compatible = "rockchip,rk3588-pcie",
1589 .data = &rk_pcie_rc_of_data,
1590 },
1591 {
1592 .compatible = "rockchip,rk3588-pcie-ep",
1593 .data = &rk_pcie_ep_of_data,
1594 },
1595 {},
1596 };
1597
1598 MODULE_DEVICE_TABLE(of, rk_pcie_of_match);
1599
1600 static const struct dw_pcie_ops dw_pcie_ops = {
1601 .start_link = rk_pcie_establish_link,
1602 .link_up = rk_pcie_link_up,
1603 };
1604
rk1808_pcie_fixup(struct rk_pcie * rk_pcie,struct device_node * np)1605 static int rk1808_pcie_fixup(struct rk_pcie *rk_pcie, struct device_node *np)
1606 {
1607 int ret;
1608 struct device *dev = rk_pcie->pci->dev;
1609
1610 rk_pcie->usb_pcie_grf = syscon_regmap_lookup_by_phandle(np,
1611 "rockchip,usbpciegrf");
1612 if (IS_ERR(rk_pcie->usb_pcie_grf)) {
1613 dev_err(dev, "failed to find usb_pcie_grf regmap\n");
1614 return PTR_ERR(rk_pcie->usb_pcie_grf);
1615 }
1616
1617 rk_pcie->pmu_grf = syscon_regmap_lookup_by_phandle(np,
1618 "rockchip,pmugrf");
1619 if (IS_ERR(rk_pcie->pmu_grf)) {
1620 dev_err(dev, "failed to find pmugrf regmap\n");
1621 return PTR_ERR(rk_pcie->pmu_grf);
1622 }
1623
1624 /* Workaround for pcie, switch to PCIe_PRSTNm0 */
1625 ret = regmap_write(rk_pcie->pmu_grf, 0x100, 0x01000100);
1626 if (ret)
1627 return ret;
1628
1629 ret = regmap_write(rk_pcie->pmu_grf, 0x0, 0x0c000000);
1630 if (ret)
1631 return ret;
1632
1633 /* release link reset grant */
1634 ret = rk_pcie_reset_grant_ctrl(rk_pcie, true);
1635 return ret;
1636 }
1637
rk_pcie_fast_link_setup(struct rk_pcie * rk_pcie)1638 static void rk_pcie_fast_link_setup(struct rk_pcie *rk_pcie)
1639 {
1640 u32 val;
1641
1642 /* LTSSM EN ctrl mode */
1643 val = rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_HOT_RESET_CTRL);
1644 val |= (PCIE_LTSSM_ENABLE_ENHANCE | PCIE_LTSSM_APP_DLY2_EN)
1645 | ((PCIE_LTSSM_APP_DLY2_EN | PCIE_LTSSM_ENABLE_ENHANCE) << 16);
1646 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_HOT_RESET_CTRL, val);
1647 }
1648
rk_pcie_legacy_irq_mask(struct irq_data * d)1649 static void rk_pcie_legacy_irq_mask(struct irq_data *d)
1650 {
1651 struct rk_pcie *rk_pcie = irq_data_get_irq_chip_data(d);
1652 unsigned long flags;
1653
1654 raw_spin_lock_irqsave(&rk_pcie->intx_lock, flags);
1655 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK_LEGACY,
1656 MASK_LEGACY_INT(d->hwirq));
1657 raw_spin_unlock_irqrestore(&rk_pcie->intx_lock, flags);
1658 }
1659
rk_pcie_legacy_irq_unmask(struct irq_data * d)1660 static void rk_pcie_legacy_irq_unmask(struct irq_data *d)
1661 {
1662 struct rk_pcie *rk_pcie = irq_data_get_irq_chip_data(d);
1663 unsigned long flags;
1664
1665 raw_spin_lock_irqsave(&rk_pcie->intx_lock, flags);
1666 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK_LEGACY,
1667 UNMASK_LEGACY_INT(d->hwirq));
1668 raw_spin_unlock_irqrestore(&rk_pcie->intx_lock, flags);
1669 }
1670
1671 static struct irq_chip rk_pcie_legacy_irq_chip = {
1672 .name = "rk-pcie-legacy-int",
1673 .irq_enable = rk_pcie_legacy_irq_unmask,
1674 .irq_disable = rk_pcie_legacy_irq_mask,
1675 .irq_mask = rk_pcie_legacy_irq_mask,
1676 .irq_unmask = rk_pcie_legacy_irq_unmask,
1677 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
1678 };
1679
rk_pcie_intx_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)1680 static int rk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
1681 irq_hw_number_t hwirq)
1682 {
1683 irq_set_chip_and_handler(irq, &rk_pcie_legacy_irq_chip, handle_simple_irq);
1684 irq_set_chip_data(irq, domain->host_data);
1685
1686 return 0;
1687 }
1688
1689 static const struct irq_domain_ops intx_domain_ops = {
1690 .map = rk_pcie_intx_map,
1691 };
1692
rk_pcie_legacy_int_handler(struct irq_desc * desc)1693 static void rk_pcie_legacy_int_handler(struct irq_desc *desc)
1694 {
1695 struct irq_chip *chip = irq_desc_get_chip(desc);
1696 struct rk_pcie *rockchip = irq_desc_get_handler_data(desc);
1697 struct device *dev = rockchip->pci->dev;
1698 u32 reg;
1699 u32 hwirq;
1700 u32 virq;
1701
1702 chained_irq_enter(chip, desc);
1703
1704 reg = rk_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_LEGACY);
1705 reg = reg & 0xf;
1706
1707 while (reg) {
1708 hwirq = ffs(reg) - 1;
1709 reg &= ~BIT(hwirq);
1710
1711 virq = irq_find_mapping(rockchip->irq_domain, hwirq);
1712 if (virq)
1713 generic_handle_irq(virq);
1714 else
1715 dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
1716 }
1717
1718 chained_irq_exit(chip, desc);
1719 }
1720
rk_pcie_init_irq_domain(struct rk_pcie * rockchip)1721 static int rk_pcie_init_irq_domain(struct rk_pcie *rockchip)
1722 {
1723 struct device *dev = rockchip->pci->dev;
1724 struct device_node *intc = of_get_next_child(dev->of_node, NULL);
1725
1726 if (!intc) {
1727 dev_err(dev, "missing child interrupt-controller node\n");
1728 return -EINVAL;
1729 }
1730
1731 raw_spin_lock_init(&rockchip->intx_lock);
1732 rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
1733 &intx_domain_ops, rockchip);
1734 if (!rockchip->irq_domain) {
1735 dev_err(dev, "failed to get a INTx IRQ domain\n");
1736 return -EINVAL;
1737 }
1738
1739 return 0;
1740 }
1741
rk_pcie_enable_power(struct rk_pcie * rk_pcie)1742 static int rk_pcie_enable_power(struct rk_pcie *rk_pcie)
1743 {
1744 int ret = 0;
1745 struct device *dev = rk_pcie->pci->dev;
1746
1747 if (IS_ERR(rk_pcie->vpcie3v3))
1748 return ret;
1749
1750 ret = regulator_enable(rk_pcie->vpcie3v3);
1751 if (ret)
1752 dev_err(dev, "fail to enable vpcie3v3 regulator\n");
1753
1754 return ret;
1755 }
1756
rk_pcie_disable_power(struct rk_pcie * rk_pcie)1757 static int rk_pcie_disable_power(struct rk_pcie *rk_pcie)
1758 {
1759 int ret = 0;
1760 struct device *dev = rk_pcie->pci->dev;
1761
1762 if (IS_ERR(rk_pcie->vpcie3v3))
1763 return ret;
1764
1765 ret = regulator_disable(rk_pcie->vpcie3v3);
1766 if (ret)
1767 dev_err(dev, "fail to disable vpcie3v3 regulator\n");
1768
1769 return ret;
1770 }
1771
1772 #define RAS_DES_EVENT(ss, v) \
1773 do { \
1774 dw_pcie_writel_dbi(pcie->pci, cap_base + 8, v); \
1775 seq_printf(s, ss "0x%x\n", dw_pcie_readl_dbi(pcie->pci, cap_base + 0xc)); \
1776 } while (0)
1777
rockchip_pcie_rasdes_show(struct seq_file * s,void * unused)1778 static int rockchip_pcie_rasdes_show(struct seq_file *s, void *unused)
1779 {
1780 struct rk_pcie *pcie = s->private;
1781 int cap_base;
1782 u32 val = rk_pcie_readl_apb(pcie, PCIE_CLIENT_CDM_RASDES_TBA_INFO_CMN);
1783 char *pm;
1784
1785 if (val & BIT(6))
1786 pm = "In training";
1787 else if (val & BIT(5))
1788 pm = "L1.2";
1789 else if (val & BIT(4))
1790 pm = "L1.1";
1791 else if (val & BIT(3))
1792 pm = "L1";
1793 else if (val & BIT(2))
1794 pm = "L0";
1795 else if (val & 0x3)
1796 pm = (val == 0x3) ? "L0s" : (val & BIT(1) ? "RX L0s" : "TX L0s");
1797 else
1798 pm = "Invalid";
1799
1800 seq_printf(s, "Common event signal status: 0x%s\n", pm);
1801
1802 cap_base = dw_pcie_find_ext_capability(pcie->pci, PCI_EXT_CAP_ID_VNDR);
1803 if (!cap_base) {
1804 dev_err(pcie->pci->dev, "Not able to find RASDES CAP!\n");
1805 return 0;
1806 }
1807
1808 RAS_DES_EVENT("EBUF Overflow: ", 0);
1809 RAS_DES_EVENT("EBUF Under-run: ", 0x0010000);
1810 RAS_DES_EVENT("Decode Error: ", 0x0020000);
1811 RAS_DES_EVENT("Running Disparity Error: ", 0x0030000);
1812 RAS_DES_EVENT("SKP OS Parity Error: ", 0x0040000);
1813 RAS_DES_EVENT("SYNC Header Error: ", 0x0050000);
1814 RAS_DES_EVENT("CTL SKP OS Parity Error: ", 0x0060000);
1815 RAS_DES_EVENT("Detect EI Infer: ", 0x1050000);
1816 RAS_DES_EVENT("Receiver Error: ", 0x1060000);
1817 RAS_DES_EVENT("Rx Recovery Request: ", 0x1070000);
1818 RAS_DES_EVENT("N_FTS Timeout: ", 0x1080000);
1819 RAS_DES_EVENT("Framing Error: ", 0x1090000);
1820 RAS_DES_EVENT("Deskew Error: ", 0x10a0000);
1821 RAS_DES_EVENT("BAD TLP: ", 0x2000000);
1822 RAS_DES_EVENT("LCRC Error: ", 0x2010000);
1823 RAS_DES_EVENT("BAD DLLP: ", 0x2020000);
1824 RAS_DES_EVENT("Replay Number Rollover: ", 0x2030000);
1825 RAS_DES_EVENT("Replay Timeout: ", 0x2040000);
1826 RAS_DES_EVENT("Rx Nak DLLP: ", 0x2050000);
1827 RAS_DES_EVENT("Tx Nak DLLP: ", 0x2060000);
1828 RAS_DES_EVENT("Retry TLP: ", 0x2070000);
1829 RAS_DES_EVENT("FC Timeout: ", 0x3000000);
1830 RAS_DES_EVENT("Poisoned TLP: ", 0x3010000);
1831 RAS_DES_EVENT("ECRC Error: ", 0x3020000);
1832 RAS_DES_EVENT("Unsupported Request: ", 0x3030000);
1833 RAS_DES_EVENT("Completer Abort: ", 0x3040000);
1834 RAS_DES_EVENT("Completion Timeout: ", 0x3050000);
1835
1836 return 0;
1837 }
rockchip_pcie_rasdes_open(struct inode * inode,struct file * file)1838 static int rockchip_pcie_rasdes_open(struct inode *inode, struct file *file)
1839 {
1840 return single_open(file, rockchip_pcie_rasdes_show,
1841 inode->i_private);
1842 }
1843
rockchip_pcie_rasdes_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)1844 static ssize_t rockchip_pcie_rasdes_write(struct file *file,
1845 const char __user *ubuf,
1846 size_t count, loff_t *ppos)
1847 {
1848 struct seq_file *s = file->private_data;
1849 struct rk_pcie *pcie = s->private;
1850 char buf[32];
1851 int cap_base;
1852
1853 if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
1854 return -EFAULT;
1855
1856 cap_base = dw_pcie_find_ext_capability(pcie->pci, PCI_EXT_CAP_ID_VNDR);
1857 if (!cap_base) {
1858 dev_err(pcie->pci->dev, "Not able to find RASDES CAP!\n");
1859 return 0;
1860 }
1861
1862 if (!strncmp(buf, "enable", 6)) {
1863 dev_info(pcie->pci->dev, "RAS DES Event: Enable ALL!\n");
1864 dw_pcie_writel_dbi(pcie->pci, cap_base + 8, 0x1c);
1865 dw_pcie_writel_dbi(pcie->pci, cap_base + 8, 0x3);
1866 } else if (!strncmp(buf, "disable", 7)) {
1867 dev_info(pcie->pci->dev, "RAS DES Event: disable ALL!\n");
1868 dw_pcie_writel_dbi(pcie->pci, cap_base + 8, 0x14);
1869 } else if (!strncmp(buf, "clear", 5)) {
1870 dev_info(pcie->pci->dev, "RAS DES Event: Clear ALL!\n");
1871 dw_pcie_writel_dbi(pcie->pci, cap_base + 8, 0x3);
1872 } else {
1873 dev_info(pcie->pci->dev, "Not support command!\n");
1874 }
1875
1876 return count;
1877 }
1878
1879 static const struct file_operations rockchip_pcie_rasdes_ops = {
1880 .owner = THIS_MODULE,
1881 .open = rockchip_pcie_rasdes_open,
1882 .read = seq_read,
1883 .write = rockchip_pcie_rasdes_write,
1884 };
1885
rockchip_pcie_fifo_show(struct seq_file * s,void * data)1886 static int rockchip_pcie_fifo_show(struct seq_file *s, void *data)
1887 {
1888 struct rk_pcie *pcie = (struct rk_pcie *)dev_get_drvdata(s->private);
1889 u32 loop;
1890
1891 seq_printf(s, "ltssm = 0x%x\n",
1892 rk_pcie_readl_apb(pcie, PCIE_CLIENT_LTSSM_STATUS));
1893 for (loop = 0; loop < 64; loop++)
1894 seq_printf(s, "fifo_status = 0x%x\n",
1895 rk_pcie_readl_apb(pcie, PCIE_CLIENT_DBG_FIFO_STATUS));
1896
1897 return 0;
1898 }
1899
rockchip_pcie_debugfs_exit(struct rk_pcie * pcie)1900 static void rockchip_pcie_debugfs_exit(struct rk_pcie *pcie)
1901 {
1902 debugfs_remove_recursive(pcie->debugfs);
1903 pcie->debugfs = NULL;
1904 }
1905
rockchip_pcie_debugfs_init(struct rk_pcie * pcie)1906 static int rockchip_pcie_debugfs_init(struct rk_pcie *pcie)
1907 {
1908 struct dentry *file;
1909
1910 pcie->debugfs = debugfs_create_dir(dev_name(pcie->pci->dev), NULL);
1911 if (!pcie->debugfs)
1912 return -ENOMEM;
1913
1914 debugfs_create_devm_seqfile(pcie->pci->dev, "dumpfifo",
1915 pcie->debugfs,
1916 rockchip_pcie_fifo_show);
1917 file = debugfs_create_file("err_event", 0644, pcie->debugfs,
1918 pcie, &rockchip_pcie_rasdes_ops);
1919 if (!file)
1920 goto remove;
1921
1922 return 0;
1923
1924 remove:
1925 rockchip_pcie_debugfs_exit(pcie);
1926
1927 return -ENOMEM;
1928 }
1929
rk_pcie_really_probe(void * p)1930 static int rk_pcie_really_probe(void *p)
1931 {
1932 struct platform_device *pdev = p;
1933 struct device *dev = &pdev->dev;
1934 struct rk_pcie *rk_pcie;
1935 struct dw_pcie *pci;
1936 int ret;
1937 const struct of_device_id *match;
1938 const struct rk_pcie_of_data *data;
1939 enum rk_pcie_device_mode mode;
1940 struct device_node *np = pdev->dev.of_node;
1941 u32 val = 0;
1942 int irq;
1943
1944 match = of_match_device(rk_pcie_of_match, dev);
1945 if (!match) {
1946 ret = -EINVAL;
1947 goto release_driver;
1948 }
1949
1950 data = (struct rk_pcie_of_data *)match->data;
1951 mode = (enum rk_pcie_device_mode)data->mode;
1952
1953 rk_pcie = devm_kzalloc(dev, sizeof(*rk_pcie), GFP_KERNEL);
1954 if (!rk_pcie) {
1955 ret = -ENOMEM;
1956 goto release_driver;
1957 }
1958
1959 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1960 if (!pci) {
1961 ret = -ENOMEM;
1962 goto release_driver;
1963 }
1964
1965 pci->dev = dev;
1966 pci->ops = &dw_pcie_ops;
1967
1968 rk_pcie->mode = mode;
1969 rk_pcie->msi_vector_num = data->msi_vector_num;
1970 rk_pcie->pci = pci;
1971
1972 if (of_device_is_compatible(np, "rockchip,rk1808-pcie") ||
1973 of_device_is_compatible(np, "rockchip,rk1808-pcie-ep"))
1974 rk_pcie->is_rk1808 = true;
1975 else
1976 rk_pcie->is_rk1808 = false;
1977
1978 if (device_property_read_bool(dev, "rockchip,bifurcation"))
1979 rk_pcie->bifurcation = true;
1980
1981 ret = rk_pcie_resource_get(pdev, rk_pcie);
1982 if (ret) {
1983 dev_err(dev, "resource init failed\n");
1984 goto release_driver;
1985 }
1986
1987 if (!IS_ERR_OR_NULL(rk_pcie->prsnt_gpio)) {
1988 if (!gpiod_get_value(rk_pcie->prsnt_gpio)) {
1989 ret = -ENODEV;
1990 goto release_driver;
1991 }
1992 }
1993
1994 rk_pcie->supports_clkreq = device_property_read_bool(dev, "supports-clkreq");
1995
1996 retry_regulator:
1997 /* DON'T MOVE ME: must be enable before phy init */
1998 rk_pcie->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
1999 if (IS_ERR(rk_pcie->vpcie3v3)) {
2000 if (PTR_ERR(rk_pcie->vpcie3v3) != -ENODEV) {
2001 if (IS_ENABLED(CONFIG_PCIE_RK_THREADED_INIT)) {
2002 /* Deferred but in threaded context for most 10s */
2003 msleep(20);
2004 if (++val < 500)
2005 goto retry_regulator;
2006 }
2007
2008 ret = PTR_ERR(rk_pcie->vpcie3v3);
2009 goto release_driver;
2010 }
2011
2012 dev_info(dev, "no vpcie3v3 regulator found\n");
2013 }
2014
2015 ret = rk_pcie_enable_power(rk_pcie);
2016 if (ret)
2017 goto release_driver;
2018
2019 ret = rk_pcie_phy_init(rk_pcie);
2020 if (ret) {
2021 dev_err(dev, "phy init failed\n");
2022 goto disable_vpcie3v3;
2023 }
2024
2025 rk_pcie->rsts = devm_reset_control_array_get_exclusive(dev);
2026 if (IS_ERR(rk_pcie->rsts)) {
2027 ret = PTR_ERR(rk_pcie->rsts);
2028 dev_err(dev, "failed to get reset lines\n");
2029 goto disable_phy;
2030 }
2031
2032 reset_control_deassert(rk_pcie->rsts);
2033
2034 ret = rk_pcie_request_sys_irq(rk_pcie, pdev);
2035 if (ret) {
2036 dev_err(dev, "pcie irq init failed\n");
2037 goto disable_phy;
2038 }
2039
2040 platform_set_drvdata(pdev, rk_pcie);
2041
2042 ret = rk_pcie_clk_init(rk_pcie);
2043 if (ret) {
2044 dev_err(dev, "clock init failed\n");
2045 goto disable_phy;
2046 }
2047
2048 dw_pcie_dbi_ro_wr_en(pci);
2049
2050 if (rk_pcie->is_rk1808) {
2051 ret = rk1808_pcie_fixup(rk_pcie, np);
2052 if (ret)
2053 goto deinit_clk;
2054 } else {
2055 rk_pcie_fast_link_setup(rk_pcie);
2056 }
2057
2058 /* Legacy interrupt is optional */
2059 ret = rk_pcie_init_irq_domain(rk_pcie);
2060 if (!ret) {
2061 irq = platform_get_irq_byname(pdev, "legacy");
2062 if (irq >= 0) {
2063 irq_set_chained_handler_and_data(irq, rk_pcie_legacy_int_handler,
2064 rk_pcie);
2065 /* Unmask all legacy interrupt from INTA~INTD */
2066 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_MASK_LEGACY,
2067 UNMASK_ALL_LEGACY_INT);
2068 } else {
2069 dev_info(dev, "missing legacy IRQ resource\n");
2070 }
2071 }
2072
2073 /* Set PCIe mode */
2074 rk_pcie_set_mode(rk_pcie);
2075
2076 /* Force into loopback master mode */
2077 if (device_property_read_bool(dev, "rockchip,lpbk-master")) {
2078 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
2079 val |= PORT_LINK_LPBK_ENABLE;
2080 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
2081 rk_pcie->is_signal_test = true;
2082 }
2083
2084 /* Force into compliance mode */
2085 if (device_property_read_bool(dev, "rockchip,compliance-mode")) {
2086 val = dw_pcie_readl_dbi(pci, PCIE_CAP_LINK_CONTROL2_LINK_STATUS);
2087 val |= BIT(4);
2088 dw_pcie_writel_dbi(pci, PCIE_CAP_LINK_CONTROL2_LINK_STATUS, val);
2089 rk_pcie->is_signal_test = true;
2090 }
2091
2092 /* Skip waiting for training to pass in system PM routine */
2093 if (device_property_read_bool(dev, "rockchip,skip-scan-in-resume"))
2094 rk_pcie->skip_scan_in_resume = true;
2095
2096 rk_pcie->hot_rst_wq = create_singlethread_workqueue("rk_pcie_hot_rst_wq");
2097 if (!rk_pcie->hot_rst_wq) {
2098 dev_err(dev, "failed to create hot_rst workqueue\n");
2099 ret = -ENOMEM;
2100 goto remove_irq_domain;
2101 }
2102 INIT_WORK(&rk_pcie->hot_rst_work, rk_pcie_hot_rst_work);
2103
2104 switch (rk_pcie->mode) {
2105 case RK_PCIE_RC_TYPE:
2106 ret = rk_add_pcie_port(rk_pcie, pdev);
2107 break;
2108 case RK_PCIE_EP_TYPE:
2109 ret = rk_pcie_add_ep(rk_pcie);
2110 break;
2111 }
2112
2113 if (rk_pcie->is_signal_test == true)
2114 return 0;
2115
2116 if (ret)
2117 goto remove_rst_wq;
2118
2119 ret = rk_pcie_init_dma_trx(rk_pcie);
2120 if (ret) {
2121 dev_err(dev, "failed to add dma extension\n");
2122 goto remove_rst_wq;
2123 }
2124
2125 if (rk_pcie->dma_obj) {
2126 rk_pcie->dma_obj->start_dma_func = rk_pcie_start_dma_dwc;
2127 rk_pcie->dma_obj->config_dma_func = rk_pcie_config_dma_dwc;
2128 }
2129
2130 if (rk_pcie->is_rk1808) {
2131 /* hold link reset grant after link-up */
2132 ret = rk_pcie_reset_grant_ctrl(rk_pcie, false);
2133 if (ret)
2134 goto remove_rst_wq;
2135 }
2136
2137 dw_pcie_dbi_ro_wr_dis(pci);
2138
2139 device_init_wakeup(dev, true);
2140
2141 /* Enable async system PM for multiports SoC */
2142 device_enable_async_suspend(dev);
2143
2144 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2145 ret = rockchip_pcie_debugfs_init(rk_pcie);
2146 if (ret < 0)
2147 dev_err(dev, "failed to setup debugfs: %d\n", ret);
2148
2149 /* Enable RASDES Error event by default */
2150 val = dw_pcie_find_ext_capability(rk_pcie->pci, PCI_EXT_CAP_ID_VNDR);
2151 if (!val) {
2152 dev_err(dev, "Not able to find RASDES CAP!\n");
2153 return 0;
2154 }
2155
2156 dw_pcie_writel_dbi(rk_pcie->pci, val + 8, 0x1c);
2157 dw_pcie_writel_dbi(rk_pcie->pci, val + 8, 0x3);
2158 }
2159
2160 return 0;
2161
2162 remove_rst_wq:
2163 destroy_workqueue(rk_pcie->hot_rst_wq);
2164 remove_irq_domain:
2165 if (rk_pcie->irq_domain)
2166 irq_domain_remove(rk_pcie->irq_domain);
2167 disable_phy:
2168 phy_power_off(rk_pcie->phy);
2169 phy_exit(rk_pcie->phy);
2170 deinit_clk:
2171 clk_bulk_disable_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
2172 disable_vpcie3v3:
2173 rk_pcie_disable_power(rk_pcie);
2174 release_driver:
2175 if (IS_ENABLED(CONFIG_PCIE_RK_THREADED_INIT))
2176 device_release_driver(dev);
2177
2178 return ret;
2179 }
2180
rk_pcie_probe(struct platform_device * pdev)2181 static int rk_pcie_probe(struct platform_device *pdev)
2182 {
2183 if (IS_ENABLED(CONFIG_PCIE_RK_THREADED_INIT)) {
2184 struct task_struct *tsk;
2185
2186 tsk = kthread_run(rk_pcie_really_probe, pdev, "rk-pcie");
2187 if (IS_ERR(tsk)) {
2188 dev_err(&pdev->dev, "start rk-pcie thread failed\n");
2189 return PTR_ERR(tsk);
2190 }
2191
2192 return 0;
2193 }
2194
2195 return rk_pcie_really_probe(pdev);
2196 }
2197
2198 #ifdef CONFIG_PCIEASPM
rk_pcie_downstream_dev_to_d0(struct rk_pcie * rk_pcie,bool enable)2199 static void rk_pcie_downstream_dev_to_d0(struct rk_pcie *rk_pcie, bool enable)
2200 {
2201 struct pcie_port *pp = &rk_pcie->pci->pp;
2202 struct pci_bus *child, *root_bus = NULL;
2203 struct pci_dev *pdev, *bridge;
2204 u32 val;
2205
2206 list_for_each_entry(child, &pp->bridge->bus->children, node) {
2207 /* Bring downstream devices to D3 if they are not already in */
2208 if (child->parent == pp->bridge->bus) {
2209 root_bus = child;
2210 bridge = root_bus->self;
2211 break;
2212 }
2213 }
2214
2215 if (!root_bus) {
2216 dev_err(rk_pcie->pci->dev, "Failed to find downstream devices\n");
2217 return;
2218 }
2219
2220 /* Save and restore root bus ASPM */
2221 if (enable) {
2222 if (rk_pcie->l1ss_ctl1)
2223 dw_pcie_writel_dbi(rk_pcie->pci, bridge->l1ss + PCI_L1SS_CTL1, rk_pcie->l1ss_ctl1);
2224
2225 /* rk_pcie->aspm woule be saved in advance when enable is false */
2226 dw_pcie_writel_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL, rk_pcie->aspm);
2227 } else {
2228 val = dw_pcie_readl_dbi(rk_pcie->pci, bridge->l1ss + PCI_L1SS_CTL1);
2229 if (val & PCI_L1SS_CTL1_L1SS_MASK)
2230 rk_pcie->l1ss_ctl1 = val;
2231 else
2232 rk_pcie->l1ss_ctl1 = 0;
2233
2234 val = dw_pcie_readl_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL);
2235 rk_pcie->aspm = val & PCI_EXP_LNKCTL_ASPMC;
2236 val &= ~(PCI_EXP_LNKCAP_ASPM_L1 | PCI_EXP_LNKCAP_ASPM_L0S);
2237 dw_pcie_writel_dbi(rk_pcie->pci, bridge->pcie_cap + PCI_EXP_LNKCTL, val);
2238 }
2239
2240 list_for_each_entry(pdev, &root_bus->devices, bus_list) {
2241 if (PCI_SLOT(pdev->devfn) == 0) {
2242 if (pci_set_power_state(pdev, PCI_D0))
2243 dev_err(rk_pcie->pci->dev,
2244 "Failed to transition %s to D3hot state\n",
2245 dev_name(&pdev->dev));
2246 if (enable) {
2247 if (rk_pcie->l1ss_ctl1) {
2248 pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, &val);
2249 val &= ~PCI_L1SS_CTL1_L1SS_MASK;
2250 val |= (rk_pcie->l1ss_ctl1 & PCI_L1SS_CTL1_L1SS_MASK);
2251 pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, val);
2252 }
2253
2254 pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
2255 PCI_EXP_LNKCTL_ASPMC, rk_pcie->aspm);
2256 } else {
2257 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
2258 }
2259 }
2260 }
2261 }
2262 #endif
2263
rockchip_dw_pcie_suspend(struct device * dev)2264 static int __maybe_unused rockchip_dw_pcie_suspend(struct device *dev)
2265 {
2266 struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
2267 int ret = 0, power;
2268 struct dw_pcie *pci = rk_pcie->pci;
2269 u32 status;
2270
2271 /*
2272 * This is as per PCI Express Base r5.0 r1.0 May 22-2019,
2273 * 5.2 Link State Power Management (Page #440).
2274 *
2275 * L2/L3 Ready entry negotiations happen while in the L0 state.
2276 * L2/L3 Ready are entered only after the negotiation completes.
2277 *
2278 * The following example sequence illustrates the multi-step Link state
2279 * transition process leading up to entering a system sleep state:
2280 * 1. System software directs all Functions of a Downstream component to D3Hot.
2281 * 2. The Downstream component then initiates the transition of the Link to L1
2282 * as required.
2283 * 3. System software then causes the Root Complex to broadcast the PME_Turn_Off
2284 * Message in preparation for removing the main power source.
2285 * 4. This Message causes the subject Link to transition back to L0 in order to
2286 * send it and to enable the Downstream component to respond with PME_TO_Ack.
2287 * 5. After sending the PME_TO_Ack, the Downstream component initiates the L2/L3
2288 * Ready transition protocol.
2289 */
2290
2291 /* 1. All sub-devices are in D3hot by PCIe stack */
2292 dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
2293
2294 rk_pcie_link_status_clear(rk_pcie);
2295
2296 /*
2297 * Wlan devices will be shutdown from function driver now, so doing L2 here
2298 * must fail. Skip L2 routine.
2299 */
2300 if (rk_pcie->skip_scan_in_resume) {
2301 rfkill_get_wifi_power_state(&power);
2302 if (!power)
2303 goto no_l2;
2304 }
2305
2306 /* 2. Broadcast PME_Turn_Off Message */
2307 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_MSG_GEN, PME_TURN_OFF);
2308 ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_MSG_GEN,
2309 status, !(status & BIT(4)), 20, RK_PCIE_L2_TMOUT_US);
2310 if (ret) {
2311 dev_err(dev, "Failed to send PME_Turn_Off\n");
2312 goto no_l2;
2313 }
2314
2315 /* 3. Wait for PME_TO_Ack */
2316 ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_INTR_STATUS_MSG_RX,
2317 status, status & BIT(9), 20, RK_PCIE_L2_TMOUT_US);
2318 if (ret) {
2319 dev_err(dev, "Failed to receive PME_TO_Ack\n");
2320 goto no_l2;
2321 }
2322
2323 /* 4. Clear PME_TO_Ack and Wait for ready to enter L23 message */
2324 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_INTR_STATUS_MSG_RX, PME_TO_ACK);
2325 ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_POWER,
2326 status, status & READY_ENTER_L23, 20, RK_PCIE_L2_TMOUT_US);
2327 if (ret) {
2328 dev_err(dev, "Failed to ready to enter L23\n");
2329 goto no_l2;
2330 }
2331
2332 /* 5. Check we are in L2 */
2333 ret = readl_poll_timeout(rk_pcie->apb_base + PCIE_CLIENT_LTSSM_STATUS,
2334 status, ((status & S_MAX) == S_L2_IDLE), 20, RK_PCIE_L2_TMOUT_US);
2335 if (ret)
2336 dev_err(pci->dev, "Link isn't in L2 idle!\n");
2337
2338 no_l2:
2339 rk_pcie_disable_ltssm(rk_pcie);
2340
2341 /* make sure assert phy success */
2342 usleep_range(200, 300);
2343
2344 phy_power_off(rk_pcie->phy);
2345 phy_exit(rk_pcie->phy);
2346
2347 clk_bulk_disable_unprepare(rk_pcie->clk_cnt, rk_pcie->clks);
2348
2349 rk_pcie->in_suspend = true;
2350
2351 gpiod_set_value_cansleep(rk_pcie->rst_gpio, 0);
2352 ret = rk_pcie_disable_power(rk_pcie);
2353
2354 return ret;
2355 }
2356
rockchip_dw_pcie_resume(struct device * dev)2357 static int __maybe_unused rockchip_dw_pcie_resume(struct device *dev)
2358 {
2359 struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
2360 bool std_rc = rk_pcie->mode == RK_PCIE_RC_TYPE && !rk_pcie->dma_obj;
2361 int ret;
2362
2363 reset_control_assert(rk_pcie->rsts);
2364 udelay(10);
2365 reset_control_deassert(rk_pcie->rsts);
2366
2367 ret = rk_pcie_enable_power(rk_pcie);
2368 if (ret)
2369 return ret;
2370
2371 ret = clk_bulk_prepare_enable(rk_pcie->clk_cnt, rk_pcie->clks);
2372 if (ret) {
2373 dev_err(dev, "failed to prepare enable pcie bulk clks: %d\n", ret);
2374 return ret;
2375 }
2376
2377 ret = phy_set_mode_ext(rk_pcie->phy, rk_pcie->phy_mode,
2378 rk_pcie->phy_sub_mode);
2379 if (ret) {
2380 dev_err(dev, "fail to set phy to mode %s, err %d\n",
2381 (rk_pcie->phy_sub_mode == PHY_MODE_PCIE_RC) ? "RC" : "EP",
2382 ret);
2383 return ret;
2384 }
2385
2386 ret = phy_init(rk_pcie->phy);
2387 if (ret < 0) {
2388 dev_err(dev, "fail to init phy, err %d\n", ret);
2389 return ret;
2390 }
2391
2392 phy_power_on(rk_pcie->phy);
2393
2394 dw_pcie_dbi_ro_wr_en(rk_pcie->pci);
2395
2396 if (rk_pcie->is_rk1808) {
2397 /* release link reset grant */
2398 ret = rk_pcie_reset_grant_ctrl(rk_pcie, true);
2399 if (ret)
2400 return ret;
2401 } else {
2402 rk_pcie_fast_link_setup(rk_pcie);
2403 }
2404
2405 /* Set PCIe mode */
2406 rk_pcie_set_mode(rk_pcie);
2407
2408 if (std_rc)
2409 dw_pcie_setup_rc(&rk_pcie->pci->pp);
2410
2411 ret = rk_pcie_establish_link(rk_pcie->pci);
2412 if (ret) {
2413 dev_err(dev, "failed to establish pcie link\n");
2414 goto err;
2415 }
2416
2417 if (std_rc)
2418 goto std_rc_done;
2419
2420 ret = rk_pcie_ep_atu_init(rk_pcie);
2421 if (ret) {
2422 dev_err(dev, "failed to init ep device\n");
2423 goto err;
2424 }
2425
2426 rk_pcie_ep_setup(rk_pcie);
2427
2428 rk_pcie->in_suspend = false;
2429
2430 std_rc_done:
2431 dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
2432 /* hold link reset grant after link-up */
2433 if (rk_pcie->is_rk1808) {
2434 ret = rk_pcie_reset_grant_ctrl(rk_pcie, false);
2435 if (ret)
2436 goto err;
2437 }
2438
2439 if (rk_pcie->pci->pp.msi_irq > 0)
2440 dw_pcie_msi_init(&rk_pcie->pci->pp);
2441
2442 return 0;
2443 err:
2444 rk_pcie_disable_power(rk_pcie);
2445
2446 return ret;
2447 }
2448
2449 #ifdef CONFIG_PCIEASPM
rockchip_dw_pcie_prepare(struct device * dev)2450 static int rockchip_dw_pcie_prepare(struct device *dev)
2451 {
2452 struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
2453
2454 dw_pcie_dbi_ro_wr_en(rk_pcie->pci);
2455 rk_pcie_downstream_dev_to_d0(rk_pcie, false);
2456 dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
2457
2458 return 0;
2459 }
2460
rockchip_dw_pcie_complete(struct device * dev)2461 static void rockchip_dw_pcie_complete(struct device *dev)
2462 {
2463 struct rk_pcie *rk_pcie = dev_get_drvdata(dev);
2464
2465 dw_pcie_dbi_ro_wr_en(rk_pcie->pci);
2466 rk_pcie_downstream_dev_to_d0(rk_pcie, true);
2467 dw_pcie_dbi_ro_wr_dis(rk_pcie->pci);
2468 }
2469 #endif
2470
2471 static const struct dev_pm_ops rockchip_dw_pcie_pm_ops = {
2472 #ifdef CONFIG_PCIEASPM
2473 .prepare = rockchip_dw_pcie_prepare,
2474 .complete = rockchip_dw_pcie_complete,
2475 #endif
2476 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_dw_pcie_suspend,
2477 rockchip_dw_pcie_resume)
2478 };
2479
2480 static struct platform_driver rk_plat_pcie_driver = {
2481 .driver = {
2482 .name = "rk-pcie",
2483 .of_match_table = rk_pcie_of_match,
2484 .suppress_bind_attrs = true,
2485 .pm = &rockchip_dw_pcie_pm_ops,
2486 },
2487 .probe = rk_pcie_probe,
2488 };
2489
2490 module_platform_driver(rk_plat_pcie_driver);
2491
2492 MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com>");
2493 MODULE_DESCRIPTION("RockChip PCIe Controller driver");
2494 MODULE_LICENSE("GPL v2");
2495