1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Rockchip DesignWare based PCIe host controller driver
4 *
5 * Copyright (c) 2021 Rockchip, Inc.
6 */
7
8 #include <common.h>
9 #include <clk.h>
10 #include <dm.h>
11 #include <generic-phy.h>
12 #include <pci.h>
13 #include <power-domain.h>
14 #include <power/regulator.h>
15 #include <reset.h>
16 #include <syscon.h>
17 #include <asm/io.h>
18 #include <asm-generic/gpio.h>
19 #include <asm/arch-rockchip/clock.h>
20 #include <linux/bitfield.h>
21 #include <linux/iopoll.h>
22 #include <linux/ioport.h>
23 #include <linux/log2.h>
24
25 DECLARE_GLOBAL_DATA_PTR;
26
27 #define RK_PCIE_DBG 0
28
29 #define __pcie_dev_print_emit(fmt, ...) \
30 ({ \
31 printf(fmt, ##__VA_ARGS__); \
32 })
33
34 #ifdef dev_err
35 #undef dev_err
36 #define dev_err(dev, fmt, ...) \
37 ({ \
38 if (dev) \
39 __pcie_dev_print_emit("%s: " fmt, dev->name, \
40 ##__VA_ARGS__); \
41 })
42 #endif
43
44 #ifdef dev_info
45 #undef dev_info
46 #define dev_info dev_err
47 #endif
48
49 #ifdef DEBUG
50 #define dev_dbg dev_err
51 #else
52 #define dev_dbg(dev, fmt, ...) \
53 ({ \
54 if (0) \
55 __dev_printk(7, dev, fmt, ##__VA_ARGS__); \
56 })
57 #endif
58
59 struct rk_pcie {
60 struct udevice *dev;
61 struct udevice *vpcie3v3;
62 void *dbi_base;
63 void *apb_base;
64 void *cfg_base;
65 fdt_size_t cfg_size;
66 struct phy phy;
67 struct clk_bulk clks;
68 int first_busno;
69 struct reset_ctl_bulk rsts;
70 struct gpio_desc rst_gpio;
71 struct pci_region io;
72 struct pci_region mem;
73 struct pci_region mem64;
74 bool is_bifurcation;
75 u32 rasdes_off;
76 u32 gen;
77 u32 lanes;
78 };
79
80 enum {
81 PCIBIOS_SUCCESSFUL = 0x0000,
82 PCIBIOS_UNSUPPORTED = -ENODEV,
83 PCIBIOS_NODEV = -ENODEV,
84 };
85
86 #define msleep(a) udelay((a) * 1000)
87 #define MAX_LINKUP_RETRIES 2
88
89 /* Parameters for the waiting for iATU enabled routine */
90 #define PCIE_CLIENT_GENERAL_DEBUG 0x104
91 #define PCIE_CLIENT_CDM_RASDES_TBA_INFO_CMN 0x154
92 #define PCIE_CLIENT_HOT_RESET_CTRL 0x180
93 #define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
94 #define PCIE_CLIENT_LTSSM_STATUS 0x300
95 #define SMLH_LINKUP BIT(16)
96 #define RDLH_LINKUP BIT(17)
97 #define PCIE_CLIENT_DBG_FIFO_MODE_CON 0x310
98 #define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D0 0x320
99 #define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D1 0x324
100 #define PCIE_CLIENT_DBG_FIFO_TRN_HIT_D0 0x328
101 #define PCIE_CLIENT_DBG_FIFO_TRN_HIT_D1 0x32c
102 #define PCIE_CLIENT_DBG_FIFO_STATUS 0x350
103 #define PCIE_CLIENT_DBG_TRANSITION_DATA 0xffff0000
104 #define PCIE_CLIENT_DBF_EN 0xffff0003
105
106 /* PCI DBICS registers */
107 #define PCIE_LINK_STATUS_REG 0x80
108 #define PCIE_LINK_STATUS_SPEED_OFF 16
109 #define PCIE_LINK_STATUS_SPEED_MASK (0xf << PCIE_LINK_STATUS_SPEED_OFF)
110 #define PCIE_LINK_STATUS_WIDTH_OFF 20
111 #define PCIE_LINK_STATUS_WIDTH_MASK (0xf << PCIE_LINK_STATUS_WIDTH_OFF)
112
113 #define PCIE_LINK_CAPABILITY 0x7c
114 #define PCIE_LINK_CTL_2 0xa0
115 #define TARGET_LINK_SPEED_MASK 0xf
116 #define LINK_SPEED_GEN_1 0x1
117 #define LINK_SPEED_GEN_2 0x2
118 #define LINK_SPEED_GEN_3 0x3
119
120 #define PCIE_PORT_LINK_CONTROL 0x710
121 #define PORT_LINK_FAST_LINK_MODE BIT(7)
122 #define PCIE_MISC_CONTROL_1_OFF 0x8bc
123 #define PCIE_DBI_RO_WR_EN BIT(0)
124
125 #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80c
126 #define PORT_LOGIC_SPEED_CHANGE BIT(17)
127 #define PORT_LINK_MODE_MASK GENMASK(21, 16)
128 #define PORT_LINK_MODE(n) FIELD_PREP(PORT_LINK_MODE_MASK, n)
129 #define PORT_LINK_MODE_1_LANES PORT_LINK_MODE(0x1)
130 #define PORT_LINK_MODE_2_LANES PORT_LINK_MODE(0x3)
131 #define PORT_LINK_MODE_4_LANES PORT_LINK_MODE(0x7)
132 #define PORT_LINK_MODE_8_LANES PORT_LINK_MODE(0xf)
133 #define PORT_LOGIC_LINK_WIDTH_MASK GENMASK(12, 8)
134 #define PORT_LOGIC_LINK_WIDTH(n) FIELD_PREP(PORT_LOGIC_LINK_WIDTH_MASK, n)
135 #define PORT_LOGIC_LINK_WIDTH_1_LANES PORT_LOGIC_LINK_WIDTH(0x1)
136 #define PORT_LOGIC_LINK_WIDTH_2_LANES PORT_LOGIC_LINK_WIDTH(0x2)
137 #define PORT_LOGIC_LINK_WIDTH_4_LANES PORT_LOGIC_LINK_WIDTH(0x4)
138 #define PORT_LOGIC_LINK_WIDTH_8_LANES PORT_LOGIC_LINK_WIDTH(0x8)
139
140
141 /*
142 * iATU Unroll-specific register definitions
143 * From 4.80 core version the address translation will be made by unroll.
144 * The registers are offset from atu_base
145 */
146 #define PCIE_ATU_UNR_REGION_CTRL1 0x00
147 #define PCIE_ATU_UNR_REGION_CTRL2 0x04
148 #define PCIE_ATU_UNR_LOWER_BASE 0x08
149 #define PCIE_ATU_UNR_UPPER_BASE 0x0c
150 #define PCIE_ATU_UNR_LIMIT 0x10
151 #define PCIE_ATU_UNR_LOWER_TARGET 0x14
152 #define PCIE_ATU_UNR_UPPER_TARGET 0x18
153
154 #define PCIE_ATU_REGION_INDEX2 (0x2 << 0)
155 #define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
156 #define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
157 #define PCIE_ATU_TYPE_MEM (0x0 << 0)
158 #define PCIE_ATU_TYPE_IO (0x2 << 0)
159 #define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
160 #define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
161 #define PCIE_ATU_ENABLE (0x1 << 31)
162 #define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
163 #define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
164 #define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
165 #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
166
167 /* Register address builder */
168 #define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
169 ((0x3 << 20) | ((region) << 9))
170 #define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
171 ((0x3 << 20) | ((region) << 9) | (0x1 << 8))
172
173 /* Parameters for the waiting for iATU enabled routine */
174 #define LINK_WAIT_MAX_IATU_RETRIES 5
175 #define LINK_WAIT_IATU 10000
176
177 #define PCIE_TYPE0_HDR_DBI2_OFFSET 0x100000
178
rk_pcie_read(void __iomem * addr,int size,u32 * val)179 static int rk_pcie_read(void __iomem *addr, int size, u32 *val)
180 {
181 if ((uintptr_t)addr & (size - 1)) {
182 *val = 0;
183 return PCIBIOS_UNSUPPORTED;
184 }
185
186 if (size == 4) {
187 *val = readl(addr);
188 } else if (size == 2) {
189 *val = readw(addr);
190 } else if (size == 1) {
191 *val = readb(addr);
192 } else {
193 *val = 0;
194 return PCIBIOS_NODEV;
195 }
196
197 return PCIBIOS_SUCCESSFUL;
198 }
199
rk_pcie_write(void __iomem * addr,int size,u32 val)200 static int rk_pcie_write(void __iomem *addr, int size, u32 val)
201 {
202 if ((uintptr_t)addr & (size - 1))
203 return PCIBIOS_UNSUPPORTED;
204
205 if (size == 4)
206 writel(val, addr);
207 else if (size == 2)
208 writew(val, addr);
209 else if (size == 1)
210 writeb(val, addr);
211 else
212 return PCIBIOS_NODEV;
213
214 return PCIBIOS_SUCCESSFUL;
215 }
216
__rk_pcie_read_apb(struct rk_pcie * rk_pcie,void __iomem * base,u32 reg,size_t size)217 static u32 __rk_pcie_read_apb(struct rk_pcie *rk_pcie, void __iomem *base,
218 u32 reg, size_t size)
219 {
220 int ret;
221 u32 val;
222
223 ret = rk_pcie_read(base + reg, size, &val);
224 if (ret)
225 dev_err(rk_pcie->dev, "Read APB address failed\n");
226
227 return val;
228 }
229
__rk_pcie_write_apb(struct rk_pcie * rk_pcie,void __iomem * base,u32 reg,size_t size,u32 val)230 static void __rk_pcie_write_apb(struct rk_pcie *rk_pcie, void __iomem *base,
231 u32 reg, size_t size, u32 val)
232 {
233 int ret;
234
235 ret = rk_pcie_write(base + reg, size, val);
236 if (ret)
237 dev_err(rk_pcie->dev, "Write APB address failed\n");
238 }
239
rk_pcie_readl_apb(struct rk_pcie * rk_pcie,u32 reg)240 static inline u32 rk_pcie_readl_apb(struct rk_pcie *rk_pcie, u32 reg)
241 {
242 return __rk_pcie_read_apb(rk_pcie, rk_pcie->apb_base, reg, 0x4);
243 }
244
rk_pcie_writel_apb(struct rk_pcie * rk_pcie,u32 reg,u32 val)245 static inline void rk_pcie_writel_apb(struct rk_pcie *rk_pcie, u32 reg,
246 u32 val)
247 {
248 __rk_pcie_write_apb(rk_pcie, rk_pcie->apb_base, reg, 0x4, val);
249 }
250
rk_pci_find_ext_capability(struct rk_pcie * rk_pcie,int cap)251 static int rk_pci_find_ext_capability(struct rk_pcie *rk_pcie, int cap)
252 {
253 u32 header;
254 int ttl;
255 int start = 0;
256 int pos = PCI_CFG_SPACE_SIZE;
257
258 /* minimum 8 bytes per capability */
259 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
260
261 header = readl(rk_pcie->dbi_base + pos);
262
263 /*
264 * If we have no capabilities, this is indicated by cap ID,
265 * cap version and next pointer all being 0.
266 */
267 if (header == 0)
268 return 0;
269
270 while (ttl-- > 0) {
271 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
272 return pos;
273
274 pos = PCI_EXT_CAP_NEXT(header);
275 if (pos < PCI_CFG_SPACE_SIZE)
276 break;
277
278 header = readl(rk_pcie->dbi_base + pos);
279 if (!header)
280 break;
281 }
282
283 return 0;
284 }
285
rk_pcie_get_link_speed(struct rk_pcie * rk_pcie)286 static int rk_pcie_get_link_speed(struct rk_pcie *rk_pcie)
287 {
288 return (readl(rk_pcie->dbi_base + PCIE_LINK_STATUS_REG) &
289 PCIE_LINK_STATUS_SPEED_MASK) >> PCIE_LINK_STATUS_SPEED_OFF;
290 }
291
rk_pcie_get_link_width(struct rk_pcie * rk_pcie)292 static int rk_pcie_get_link_width(struct rk_pcie *rk_pcie)
293 {
294 return (readl(rk_pcie->dbi_base + PCIE_LINK_STATUS_REG) &
295 PCIE_LINK_STATUS_WIDTH_MASK) >> PCIE_LINK_STATUS_WIDTH_OFF;
296 }
297
rk_pcie_writel_ob_unroll(struct rk_pcie * rk_pcie,u32 index,u32 reg,u32 val)298 static void rk_pcie_writel_ob_unroll(struct rk_pcie *rk_pcie, u32 index,
299 u32 reg, u32 val)
300 {
301 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
302 void __iomem *base = rk_pcie->dbi_base;
303
304 writel(val, base + offset + reg);
305 }
306
rk_pcie_readl_ob_unroll(struct rk_pcie * rk_pcie,u32 index,u32 reg)307 static u32 rk_pcie_readl_ob_unroll(struct rk_pcie *rk_pcie, u32 index, u32 reg)
308 {
309 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
310 void __iomem *base = rk_pcie->dbi_base;
311
312 return readl(base + offset + reg);
313 }
314
rk_pcie_dbi_write_enable(struct rk_pcie * rk_pcie,bool en)315 static inline void rk_pcie_dbi_write_enable(struct rk_pcie *rk_pcie, bool en)
316 {
317 u32 val;
318
319 val = readl(rk_pcie->dbi_base + PCIE_MISC_CONTROL_1_OFF);
320
321 if (en)
322 val |= PCIE_DBI_RO_WR_EN;
323 else
324 val &= ~PCIE_DBI_RO_WR_EN;
325 writel(val, rk_pcie->dbi_base + PCIE_MISC_CONTROL_1_OFF);
326 }
327
rk_pcie_setup_host(struct rk_pcie * rk_pcie)328 static void rk_pcie_setup_host(struct rk_pcie *rk_pcie)
329 {
330 u32 val;
331
332 rk_pcie_dbi_write_enable(rk_pcie, true);
333
334 /* setup RC BARs */
335 writel(PCI_BASE_ADDRESS_MEM_TYPE_64,
336 rk_pcie->dbi_base + PCI_BASE_ADDRESS_0);
337 writel(0x0, rk_pcie->dbi_base + PCI_BASE_ADDRESS_1);
338
339 /* setup interrupt pins */
340 val = readl(rk_pcie->dbi_base + PCI_INTERRUPT_LINE);
341 val &= 0xffff00ff;
342 val |= 0x00000100;
343 writel(val, rk_pcie->dbi_base + PCI_INTERRUPT_LINE);
344
345 /* setup bus numbers */
346 val = readl(rk_pcie->dbi_base + PCI_PRIMARY_BUS);
347 val &= 0xff000000;
348 val |= 0x00ff0100;
349 writel(val, rk_pcie->dbi_base + PCI_PRIMARY_BUS);
350
351 val = readl(rk_pcie->dbi_base + PCI_PRIMARY_BUS);
352
353 /* setup command register */
354 val = readl(rk_pcie->dbi_base + PCI_COMMAND);
355 val &= 0xffff0000;
356 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
357 PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
358 writel(val, rk_pcie->dbi_base + PCI_COMMAND);
359
360 /* program correct class for RC */
361 writew(PCI_CLASS_BRIDGE_PCI, rk_pcie->dbi_base + PCI_CLASS_DEVICE);
362 /* Better disable write permission right after the update */
363
364 val = readl(rk_pcie->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
365 val |= PORT_LOGIC_SPEED_CHANGE;
366 writel(val, rk_pcie->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
367
368 /* Disable BAR0 BAR1 */
369 writel(0, rk_pcie->dbi_base + PCIE_TYPE0_HDR_DBI2_OFFSET + 0x10 + 0 * 4);
370 writel(0, rk_pcie->dbi_base + PCIE_TYPE0_HDR_DBI2_OFFSET + 0x10 + 1 * 4);
371
372 rk_pcie_dbi_write_enable(rk_pcie, false);
373 }
374
rk_pcie_configure(struct rk_pcie * pci,u32 cap_speed,u32 cap_lanes)375 static void rk_pcie_configure(struct rk_pcie *pci, u32 cap_speed, u32 cap_lanes)
376 {
377 u32 val;
378
379 rk_pcie_dbi_write_enable(pci, true);
380
381 val = readl(pci->dbi_base + PCIE_LINK_CAPABILITY);
382 val &= ~TARGET_LINK_SPEED_MASK;
383 val |= cap_speed;
384 writel(val, pci->dbi_base + PCIE_LINK_CAPABILITY);
385
386 val = readl(pci->dbi_base + PCIE_LINK_CTL_2);
387 val &= ~TARGET_LINK_SPEED_MASK;
388 val |= cap_speed;
389 writel(val, pci->dbi_base + PCIE_LINK_CTL_2);
390
391 val = readl(pci->dbi_base + PCIE_PORT_LINK_CONTROL);
392
393 /* Set the number of lanes */
394 val &= ~PORT_LINK_FAST_LINK_MODE;
395 val &= ~PORT_LINK_MODE_MASK;
396 switch (cap_lanes) {
397 case 1:
398 val |= PORT_LINK_MODE_1_LANES;
399 break;
400 case 2:
401 val |= PORT_LINK_MODE_2_LANES;
402 break;
403 case 4:
404 val |= PORT_LINK_MODE_4_LANES;
405 break;
406 case 8:
407 val |= PORT_LINK_MODE_8_LANES;
408 break;
409 default:
410 dev_err(pci->dev, "cap_lanes %u: invalid value\n", cap_lanes);
411 return;
412 }
413 writel(val, pci->dbi_base + PCIE_PORT_LINK_CONTROL);
414
415 /* Set link width speed control register */
416 val = readl(pci->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
417 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
418 switch (cap_lanes) {
419 case 1:
420 val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
421 break;
422 case 2:
423 val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
424 break;
425 case 4:
426 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
427 break;
428 case 8:
429 val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
430 break;
431 }
432 writel(val, pci->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
433
434 rk_pcie_dbi_write_enable(pci, false);
435 }
436
rk_pcie_prog_outbound_atu_unroll(struct rk_pcie * pci,int index,int type,u64 cpu_addr,u64 pci_addr,u32 size)437 static void rk_pcie_prog_outbound_atu_unroll(struct rk_pcie *pci, int index,
438 int type, u64 cpu_addr,
439 u64 pci_addr, u32 size)
440 {
441 u32 retries, val;
442
443 dev_dbg(pci->dev, "ATU programmed with: index: %d, type: %d, cpu addr: %8llx, pci addr: %8llx, size: %8x\n",
444 index, type, cpu_addr, pci_addr, size);
445
446 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
447 lower_32_bits(cpu_addr));
448 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
449 upper_32_bits(cpu_addr));
450 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
451 lower_32_bits(cpu_addr + size - 1));
452 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
453 lower_32_bits(pci_addr));
454 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
455 upper_32_bits(pci_addr));
456 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
457 type);
458 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
459 PCIE_ATU_ENABLE);
460
461 /*
462 * Make sure ATU enable takes effect before any subsequent config
463 * and I/O accesses.
464 */
465 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
466 val = rk_pcie_readl_ob_unroll(pci, index,
467 PCIE_ATU_UNR_REGION_CTRL2);
468 if (val & PCIE_ATU_ENABLE)
469 return;
470
471 udelay(LINK_WAIT_IATU);
472 }
473 dev_err(pci->dev, "outbound iATU is not being enabled\n");
474 }
475
rk_pcie_addr_valid(pci_dev_t d,int first_busno)476 static int rk_pcie_addr_valid(pci_dev_t d, int first_busno)
477 {
478 if ((PCI_BUS(d) == first_busno) && (PCI_DEV(d) > 0))
479 return 0;
480 if ((PCI_BUS(d) == first_busno + 1) && (PCI_DEV(d) > 0))
481 return 0;
482
483 return 1;
484 }
485
set_cfg_address(struct rk_pcie * pcie,pci_dev_t d,uint where)486 static uintptr_t set_cfg_address(struct rk_pcie *pcie,
487 pci_dev_t d, uint where)
488 {
489 int bus = PCI_BUS(d) - pcie->first_busno;
490 uintptr_t va_address;
491 u32 atu_type;
492
493 /* Use dbi_base for own configuration read and write */
494 if (!bus) {
495 va_address = (uintptr_t)pcie->dbi_base;
496 goto out;
497 }
498
499 if (bus == 1)
500 /*
501 * For local bus whose primary bus number is root bridge,
502 * change TLP Type field to 4.
503 */
504 atu_type = PCIE_ATU_TYPE_CFG0;
505 else
506 /* Otherwise, change TLP Type field to 5. */
507 atu_type = PCIE_ATU_TYPE_CFG1;
508
509 /*
510 * Not accessing root port configuration space?
511 * Region #0 is used for Outbound CFG space access.
512 * Direction = Outbound
513 * Region Index = 0
514 */
515 d = PCI_MASK_BUS(d);
516 d = PCI_ADD_BUS(bus, d);
517 rk_pcie_prog_outbound_atu_unroll(pcie, PCIE_ATU_REGION_INDEX1,
518 atu_type, (u64)pcie->cfg_base,
519 d << 8, pcie->cfg_size);
520
521 va_address = (uintptr_t)pcie->cfg_base;
522
523 out:
524 va_address += where & ~0x3;
525
526 return va_address;
527 }
528
rockchip_pcie_rd_conf(struct udevice * bus,pci_dev_t bdf,uint offset,ulong * valuep,enum pci_size_t size)529 static int rockchip_pcie_rd_conf(struct udevice *bus, pci_dev_t bdf,
530 uint offset, ulong *valuep,
531 enum pci_size_t size)
532 {
533 struct rk_pcie *pcie = dev_get_priv(bus);
534 uintptr_t va_address;
535 ulong value;
536
537 debug("PCIE CFG read: bdf=%2x:%2x:%2x\n",
538 PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf));
539
540 if (!rk_pcie_addr_valid(bdf, pcie->first_busno)) {
541 debug("- out of range\n");
542 *valuep = pci_get_ff(size);
543 return 0;
544 }
545
546 va_address = set_cfg_address(pcie, bdf, offset);
547
548 value = readl(va_address);
549
550 debug("(addr,val)=(0x%04x, 0x%08lx)\n", offset, value);
551 *valuep = pci_conv_32_to_size(value, offset, size);
552
553 rk_pcie_prog_outbound_atu_unroll(pcie, PCIE_ATU_REGION_INDEX1,
554 PCIE_ATU_TYPE_IO, pcie->io.phys_start,
555 pcie->io.bus_start, pcie->io.size);
556
557 return 0;
558 }
559
rockchip_pcie_wr_conf(struct udevice * bus,pci_dev_t bdf,uint offset,ulong value,enum pci_size_t size)560 static int rockchip_pcie_wr_conf(struct udevice *bus, pci_dev_t bdf,
561 uint offset, ulong value,
562 enum pci_size_t size)
563 {
564 struct rk_pcie *pcie = dev_get_priv(bus);
565 uintptr_t va_address;
566 ulong old;
567
568 debug("PCIE CFG write: (b,d,f)=(%2d,%2d,%2d)\n",
569 PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf));
570 debug("(addr,val)=(0x%04x, 0x%08lx)\n", offset, value);
571 if (!rk_pcie_addr_valid(bdf, pcie->first_busno)) {
572 debug("- out of range\n");
573 return 0;
574 }
575
576 va_address = set_cfg_address(pcie, bdf, offset);
577
578 old = readl(va_address);
579 value = pci_conv_size_to_32(old, value, offset, size);
580 writel(value, va_address);
581
582 rk_pcie_prog_outbound_atu_unroll(pcie, PCIE_ATU_REGION_INDEX1,
583 PCIE_ATU_TYPE_IO, pcie->io.phys_start,
584 pcie->io.bus_start, pcie->io.size);
585
586 return 0;
587 }
588
rk_pcie_enable_debug(struct rk_pcie * rk_pcie)589 static void rk_pcie_enable_debug(struct rk_pcie *rk_pcie)
590 {
591 #if RK_PCIE_DBG
592 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_PTN_HIT_D0,
593 PCIE_CLIENT_DBG_TRANSITION_DATA);
594 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_PTN_HIT_D1,
595 PCIE_CLIENT_DBG_TRANSITION_DATA);
596 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_TRN_HIT_D0,
597 PCIE_CLIENT_DBG_TRANSITION_DATA);
598 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_TRN_HIT_D1,
599 PCIE_CLIENT_DBG_TRANSITION_DATA);
600 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_MODE_CON,
601 PCIE_CLIENT_DBF_EN);
602 #endif
603 }
604
rk_pcie_debug_dump(struct rk_pcie * rk_pcie)605 static void rk_pcie_debug_dump(struct rk_pcie *rk_pcie)
606 {
607 #if RK_PCIE_DBG
608 u32 loop;
609
610 dev_err(rk_pcie->dev, "ltssm = 0x%x\n",
611 rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
612 for (loop = 0; loop < 64; loop++)
613 dev_err(rk_pcie->dev, "fifo_status = 0x%x\n",
614 rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_STATUS));
615 #endif
616 }
617
rk_pcie_link_status_clear(struct rk_pcie * rk_pcie)618 static inline void rk_pcie_link_status_clear(struct rk_pcie *rk_pcie)
619 {
620 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_GENERAL_DEBUG, 0x0);
621 }
622
rk_pcie_disable_ltssm(struct rk_pcie * rk_pcie)623 static inline void rk_pcie_disable_ltssm(struct rk_pcie *rk_pcie)
624 {
625 rk_pcie_writel_apb(rk_pcie, 0x0, 0xc0008);
626 }
627
rk_pcie_enable_ltssm(struct rk_pcie * rk_pcie)628 static inline void rk_pcie_enable_ltssm(struct rk_pcie *rk_pcie)
629 {
630 rk_pcie_writel_apb(rk_pcie, 0x0, 0xc000c);
631 }
632
is_link_up(struct rk_pcie * priv)633 static int is_link_up(struct rk_pcie *priv)
634 {
635 u32 val;
636
637 val = rk_pcie_readl_apb(priv, PCIE_CLIENT_LTSSM_STATUS);
638 if ((val & (RDLH_LINKUP | SMLH_LINKUP)) == 0x30000 &&
639 (val & GENMASK(5, 0)) == 0x11)
640 return 1;
641
642 return 0;
643 }
644
rk_pcie_link_up(struct rk_pcie * priv,u32 cap_speed,u32 cap_lanes)645 static int rk_pcie_link_up(struct rk_pcie *priv, u32 cap_speed, u32 cap_lanes)
646 {
647 int retries;
648
649 if (is_link_up(priv)) {
650 printf("PCI Link already up before configuration!\n");
651 return 1;
652 }
653
654 /* DW pre link configurations */
655 rk_pcie_configure(priv, cap_speed, cap_lanes);
656
657 /* Release the device */
658 if (dm_gpio_is_valid(&priv->rst_gpio)) {
659 /*
660 * T_PVPERL (Power stable to PERST# inactive) should be a minimum of 100ms.
661 * We add a 200ms by default for sake of hoping everthings
662 * work fine.
663 */
664 msleep(200);
665 dm_gpio_set_value(&priv->rst_gpio, 1);
666 /*
667 * Add this 20ms delay because we observe link is always up stably after it and
668 * could help us save 20ms for scanning devices.
669 */
670 msleep(20);
671 }
672
673 rk_pcie_disable_ltssm(priv);
674 rk_pcie_link_status_clear(priv);
675 rk_pcie_enable_debug(priv);
676
677 /* Enable LTSSM */
678 rk_pcie_enable_ltssm(priv);
679
680 for (retries = 0; retries < 50; retries++) {
681 if (is_link_up(priv)) {
682 dev_info(priv->dev, "PCIe Link up, LTSSM is 0x%x\n",
683 rk_pcie_readl_apb(priv, PCIE_CLIENT_LTSSM_STATUS));
684 rk_pcie_debug_dump(priv);
685 /* Link maybe in Gen switch recovery but we need to wait more 1s */
686 msleep(1000);
687 return 0;
688 }
689
690 dev_info(priv->dev, "PCIe Linking... LTSSM is 0x%x\n",
691 rk_pcie_readl_apb(priv, PCIE_CLIENT_LTSSM_STATUS));
692 rk_pcie_debug_dump(priv);
693 msleep(10);
694 }
695
696 dev_err(priv->dev, "PCIe-%d Link Fail\n", priv->dev->seq);
697 rk_pcie_disable_ltssm(priv);
698 if (dm_gpio_is_valid(&priv->rst_gpio))
699 dm_gpio_set_value(&priv->rst_gpio, 0);
700
701 return -EINVAL;
702 }
703
rockchip_pcie_init_port(struct udevice * dev)704 static int rockchip_pcie_init_port(struct udevice *dev)
705 {
706 int ret, retries;
707 u32 val;
708 struct rk_pcie *priv = dev_get_priv(dev);
709 union phy_configure_opts phy_cfg;
710
711 /* Rest the device */
712 if (dm_gpio_is_valid(&priv->rst_gpio))
713 dm_gpio_set_value(&priv->rst_gpio, 0);
714
715 /* Set power and maybe external ref clk input */
716 if (priv->vpcie3v3) {
717 ret = regulator_set_enable(priv->vpcie3v3, true);
718 if (ret) {
719 dev_err(priv->dev, "failed to enable vpcie3v3 (ret=%d)\n",
720 ret);
721 return ret;
722 }
723 }
724
725 if (priv->is_bifurcation) {
726 phy_cfg.pcie.is_bifurcation = true;
727 ret = generic_phy_configure(&priv->phy, &phy_cfg);
728 if (ret)
729 dev_err(dev, "failed to set bifurcation for phy (ret=%d)\n", ret);
730 }
731
732 ret = generic_phy_init(&priv->phy);
733 if (ret) {
734 dev_err(dev, "failed to init phy (ret=%d)\n", ret);
735 goto err_disable_3v3;
736 }
737
738 ret = generic_phy_power_on(&priv->phy);
739 if (ret) {
740 dev_err(dev, "failed to power on phy (ret=%d)\n", ret);
741 goto err_exit_phy;
742 }
743
744 ret = reset_deassert_bulk(&priv->rsts);
745 if (ret) {
746 dev_err(dev, "failed to deassert resets (ret=%d)\n", ret);
747 goto err_power_off_phy;
748 }
749
750 ret = clk_enable_bulk(&priv->clks);
751 if (ret) {
752 dev_err(dev, "failed to enable clks (ret=%d)\n", ret);
753 goto err_deassert_bulk;
754 }
755
756 /* LTSSM EN ctrl mode */
757 val = rk_pcie_readl_apb(priv, PCIE_CLIENT_HOT_RESET_CTRL);
758 val |= PCIE_LTSSM_ENABLE_ENHANCE | (PCIE_LTSSM_ENABLE_ENHANCE << 16);
759 rk_pcie_writel_apb(priv, PCIE_CLIENT_HOT_RESET_CTRL, val);
760
761 /* Set RC mode */
762 rk_pcie_writel_apb(priv, 0x0, 0xf00040);
763 rk_pcie_setup_host(priv);
764
765 for (retries = MAX_LINKUP_RETRIES; retries > 0; retries--) {
766 ret = rk_pcie_link_up(priv, priv->gen, priv->lanes);
767 if (ret >= 0)
768 return 0;
769 if(priv->vpcie3v3) {
770 regulator_set_enable(priv->vpcie3v3, false);
771 msleep(200);
772 regulator_set_enable(priv->vpcie3v3, true);
773 }
774 }
775
776 if (retries <= 0)
777 goto err_link_up;
778
779 return 0;
780 err_link_up:
781 clk_disable_bulk(&priv->clks);
782 err_deassert_bulk:
783 reset_assert_bulk(&priv->rsts);
784 err_power_off_phy:
785 if (!priv->is_bifurcation)
786 generic_phy_power_off(&priv->phy);
787 err_exit_phy:
788 if (!priv->is_bifurcation)
789 generic_phy_exit(&priv->phy);
790 err_disable_3v3:
791 if(priv->vpcie3v3 && !priv->is_bifurcation)
792 regulator_set_enable(priv->vpcie3v3, false);
793 return ret;
794 }
795
rockchip_pcie_parse_dt(struct udevice * dev)796 static int rockchip_pcie_parse_dt(struct udevice *dev)
797 {
798 struct rk_pcie *priv = dev_get_priv(dev);
799 u32 max_link_speed, num_lanes;
800 int ret;
801 struct resource res;
802
803 ret = dev_read_resource_byname(dev, "pcie-dbi", &res);
804 if (ret)
805 return -ENODEV;
806 priv->dbi_base = (void *)(res.start);
807 dev_dbg(dev, "DBI address is 0x%p\n", priv->dbi_base);
808
809 ret = dev_read_resource_byname(dev, "pcie-apb", &res);
810 if (ret)
811 return -ENODEV;
812 priv->apb_base = (void *)(res.start);
813 dev_dbg(dev, "APB address is 0x%p\n", priv->apb_base);
814
815 ret = gpio_request_by_name(dev, "reset-gpios", 0,
816 &priv->rst_gpio, GPIOD_IS_OUT);
817 if (ret) {
818 dev_err(dev, "failed to find reset-gpios property\n");
819 return ret;
820 }
821
822 ret = reset_get_bulk(dev, &priv->rsts);
823 if (ret) {
824 dev_err(dev, "Can't get reset: %d\n", ret);
825 return ret;
826 }
827
828 ret = clk_get_bulk(dev, &priv->clks);
829 if (ret) {
830 dev_err(dev, "Can't get clock: %d\n", ret);
831 return ret;
832 }
833
834 ret = device_get_supply_regulator(dev, "vpcie3v3-supply",
835 &priv->vpcie3v3);
836 if (ret && ret != -ENOENT) {
837 dev_err(dev, "failed to get vpcie3v3 supply (ret=%d)\n", ret);
838 return ret;
839 }
840
841 ret = generic_phy_get_by_index(dev, 0, &priv->phy);
842 if (ret) {
843 dev_err(dev, "failed to get pcie phy (ret=%d)\n", ret);
844 return ret;
845 }
846
847 if (dev_read_bool(dev, "rockchip,bifurcation"))
848 priv->is_bifurcation = true;
849
850 ret = ofnode_read_u32(dev->node, "max-link-speed", &max_link_speed);
851 if (ret < 0 || max_link_speed > 4)
852 priv->gen = 0;
853 else
854 priv->gen = max_link_speed;
855
856 ret = ofnode_read_u32(dev->node, "num-lanes", &num_lanes);
857 if (ret >= 0 && ilog2(num_lanes) >= 0 && ilog2(num_lanes) <= 3)
858 priv->lanes = num_lanes;
859
860 return 0;
861 }
862
rockchip_pcie_probe(struct udevice * dev)863 static int rockchip_pcie_probe(struct udevice *dev)
864 {
865 struct rk_pcie *priv = dev_get_priv(dev);
866 struct udevice *ctlr = pci_get_controller(dev);
867 struct pci_controller *hose = dev_get_uclass_priv(ctlr);
868 int ret;
869
870 priv->first_busno = dev->seq;
871 priv->dev = dev;
872
873 ret = rockchip_pcie_parse_dt(dev);
874 if (ret)
875 return ret;
876
877 ret = rockchip_pcie_init_port(dev);
878 if (ret)
879 goto free_rst;
880
881 dev_info(dev, "PCIE-%d: Link up (Gen%d-x%d, Bus%d)\n",
882 dev->seq, rk_pcie_get_link_speed(priv),
883 rk_pcie_get_link_width(priv),
884 hose->first_busno);
885
886 for (ret = 0; ret < hose->region_count; ret++) {
887 if (hose->regions[ret].flags == PCI_REGION_IO) {
888 priv->io.phys_start = hose->regions[ret].phys_start; /* IO base */
889 priv->io.bus_start = hose->regions[ret].bus_start; /* IO_bus_addr */
890 priv->io.size = hose->regions[ret].size; /* IO size */
891 } else if (hose->regions[ret].flags == PCI_REGION_MEM) {
892 if (upper_32_bits(hose->regions[ret].bus_start)) {/* MEM64 base */
893 priv->mem64.phys_start = hose->regions[ret].phys_start;
894 priv->mem64.bus_start = hose->regions[ret].bus_start;
895 priv->mem64.size = hose->regions[ret].size;
896 } else { /* MEM32 base */
897 priv->mem.phys_start = hose->regions[ret].phys_start;
898 priv->mem.bus_start = hose->regions[ret].bus_start;
899 priv->mem.size = hose->regions[ret].size;
900 }
901 } else if (hose->regions[ret].flags == PCI_REGION_SYS_MEMORY) {
902 priv->cfg_base = (void *)(priv->io.phys_start - priv->io.size);
903 priv->cfg_size = priv->io.size;
904 } else if (hose->regions[ret].flags == PCI_REGION_PREFETCH) {
905 dev_err(dev, "don't support prefetchable memory, please fix your dtb.\n");
906 } else {
907 dev_err(dev, "invalid flags type\n");
908 }
909 }
910
911 #ifdef CONFIG_SYS_PCI_64BIT
912 dev_dbg(dev, "Config space: [0x%p - 0x%p, size 0x%llx]\n",
913 priv->cfg_base, priv->cfg_base + priv->cfg_size,
914 priv->cfg_size);
915
916 dev_dbg(dev, "IO space: [0x%llx - 0x%llx, size 0x%llx]\n",
917 priv->io.phys_start, priv->io.phys_start + priv->io.size,
918 priv->io.size);
919
920 dev_dbg(dev, "IO bus: [0x%llx - 0x%llx, size 0x%llx]\n",
921 priv->io.bus_start, priv->io.bus_start + priv->io.size,
922 priv->io.size);
923
924 dev_dbg(dev, "MEM32 space: [0x%llx - 0x%llx, size 0x%llx]\n",
925 priv->mem.phys_start, priv->mem.phys_start + priv->mem.size,
926 priv->mem.size);
927
928 dev_dbg(dev, "MEM32 bus: [0x%llx - 0x%llx, size 0x%llx]\n",
929 priv->mem.bus_start, priv->mem.bus_start + priv->mem.size,
930 priv->mem.size);
931
932 dev_dbg(dev, "MEM64 space: [0x%llx - 0x%llx, size 0x%llx]\n",
933 priv->mem64.phys_start, priv->mem64.phys_start + priv->mem64.size,
934 priv->mem64.size);
935
936 dev_dbg(dev, "MEM64 bus: [0x%llx - 0x%llx, size 0x%llx]\n",
937 priv->mem64.bus_start, priv->mem64.bus_start + priv->mem64.size,
938 priv->mem64.size);
939
940 rk_pcie_prog_outbound_atu_unroll(priv, PCIE_ATU_REGION_INDEX2,
941 PCIE_ATU_TYPE_MEM,
942 priv->mem64.phys_start,
943 priv->mem64.bus_start, priv->mem64.size);
944 #else
945 dev_dbg(dev, "Config space: [0x%p - 0x%p, size 0x%llx]\n",
946 priv->cfg_base, priv->cfg_base + priv->cfg_size,
947 priv->cfg_size);
948
949 dev_dbg(dev, "IO space: [0x%llx - 0x%llx, size 0x%x]\n",
950 priv->io.phys_start, priv->io.phys_start + priv->io.size,
951 priv->io.size);
952
953 dev_dbg(dev, "IO bus: [0x%x - 0x%x, size 0x%x]\n",
954 priv->io.bus_start, priv->io.bus_start + priv->io.size,
955 priv->io.size);
956
957 dev_dbg(dev, "MEM32 space: [0x%llx - 0x%llx, size 0x%x]\n",
958 priv->mem.phys_start, priv->mem.phys_start + priv->mem.size,
959 priv->mem.size);
960
961 dev_dbg(dev, "MEM32 bus: [0x%x - 0x%x, size 0x%x]\n",
962 priv->mem.bus_start, priv->mem.bus_start + priv->mem.size,
963 priv->mem.size);
964
965 #endif
966 rk_pcie_prog_outbound_atu_unroll(priv, PCIE_ATU_REGION_INDEX0,
967 PCIE_ATU_TYPE_MEM,
968 priv->mem.phys_start,
969 priv->mem.bus_start, priv->mem.size);
970
971 priv->rasdes_off = rk_pci_find_ext_capability(priv, PCI_EXT_CAP_ID_VNDR);
972 if (priv->rasdes_off) {
973 /* Enable RC's err dump */
974 writel(0x1c, priv->dbi_base + priv->rasdes_off + 8);
975 writel(0x3, priv->dbi_base + priv->rasdes_off + 8);
976 }
977
978 return 0;
979 free_rst:
980 dm_gpio_free(dev, &priv->rst_gpio);
981 return ret;
982 }
983
984 #define RAS_DES_EVENT(ss, v) \
985 do { \
986 writel(v, priv->dbi_base + cap_base + 8); \
987 printf(ss "0x%x\n", readl(priv->dbi_base + cap_base + 0xc)); \
988 } while (0)
989
rockchip_pcie_err_dump(struct udevice * bus)990 static int rockchip_pcie_err_dump(struct udevice *bus)
991 {
992 struct rk_pcie *priv = dev_get_priv(bus);
993 u32 val = rk_pcie_readl_apb(priv, PCIE_CLIENT_CDM_RASDES_TBA_INFO_CMN);
994 int cap_base;
995 char *pm;
996
997 if (val & BIT(6))
998 pm = "In training";
999 else if (val & BIT(5))
1000 pm = "L1.2";
1001 else if (val & BIT(4))
1002 pm = "L1.1";
1003 else if (val & BIT(3))
1004 pm = "L1";
1005 else if (val & BIT(2))
1006 pm = "L0";
1007 else if (val & 0x3)
1008 pm = (val == 0x3) ? "L0s" : (val & BIT(1) ? "RX L0s" : "TX L0s");
1009 else
1010 pm = "Invalid";
1011
1012 printf("Common event signal status: %s\n", pm);
1013
1014 cap_base = priv->rasdes_off;
1015 if (!priv->rasdes_off)
1016 return 0;
1017
1018 RAS_DES_EVENT("EBUF Overflow: ", 0);
1019 RAS_DES_EVENT("EBUF Under-run: ", 0x0010000);
1020 RAS_DES_EVENT("Decode Error: ", 0x0020000);
1021 RAS_DES_EVENT("Running Disparity Error: ", 0x0030000);
1022 RAS_DES_EVENT("SKP OS Parity Error: ", 0x0040000);
1023 RAS_DES_EVENT("SYNC Header Error: ", 0x0050000);
1024 RAS_DES_EVENT("CTL SKP OS Parity Error: ", 0x0060000);
1025 RAS_DES_EVENT("Detect EI Infer: ", 0x1050000);
1026 RAS_DES_EVENT("Receiver Error: ", 0x1060000);
1027 RAS_DES_EVENT("Rx Recovery Request: ", 0x1070000);
1028 RAS_DES_EVENT("N_FTS Timeout: ", 0x1080000);
1029 RAS_DES_EVENT("Framing Error: ", 0x1090000);
1030 RAS_DES_EVENT("Deskew Error: ", 0x10a0000);
1031 RAS_DES_EVENT("BAD TLP: ", 0x2000000);
1032 RAS_DES_EVENT("LCRC Error: ", 0x2010000);
1033 RAS_DES_EVENT("BAD DLLP: ", 0x2020000);
1034 RAS_DES_EVENT("Replay Number Rollover: ", 0x2030000);
1035 RAS_DES_EVENT("Replay Timeout: ", 0x2040000);
1036 RAS_DES_EVENT("Rx Nak DLLP: ", 0x2050000);
1037 RAS_DES_EVENT("Tx Nak DLLP: ", 0x2060000);
1038 RAS_DES_EVENT("Retry TLP: ", 0x2070000);
1039 RAS_DES_EVENT("FC Timeout: ", 0x3000000);
1040 RAS_DES_EVENT("Poisoned TLP: ", 0x3010000);
1041 RAS_DES_EVENT("ECRC Error: ", 0x3020000);
1042 RAS_DES_EVENT("Unsupported Request: ", 0x3030000);
1043 RAS_DES_EVENT("Completer Abort: ", 0x3040000);
1044 RAS_DES_EVENT("Completion Timeout: ", 0x3050000);
1045
1046 return 0;
1047 }
1048
1049 static const struct dm_pci_ops rockchip_pcie_ops = {
1050 .read_config = rockchip_pcie_rd_conf,
1051 .write_config = rockchip_pcie_wr_conf,
1052 .vendor_aer_dump = rockchip_pcie_err_dump,
1053 };
1054
1055 static const struct udevice_id rockchip_pcie_ids[] = {
1056 { .compatible = "rockchip,rk3528-pcie" },
1057 { .compatible = "rockchip,rk3562-pcie" },
1058 { .compatible = "rockchip,rk3568-pcie" },
1059 { .compatible = "rockchip,rk3588-pcie" },
1060 { .compatible = "rockchip,rk3576-pcie" },
1061 { }
1062 };
1063
1064 U_BOOT_DRIVER(rockchip_pcie) = {
1065 .name = "pcie_dw_rockchip",
1066 .id = UCLASS_PCI,
1067 .of_match = rockchip_pcie_ids,
1068 .ops = &rockchip_pcie_ops,
1069 .probe = rockchip_pcie_probe,
1070 .priv_auto_alloc_size = sizeof(struct rk_pcie),
1071 };
1072