1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Rockchip DesignWare based PCIe host controller driver
4 *
5 * Copyright (c) 2021 Rockchip, Inc.
6 */
7
8 #include <common.h>
9 #include <clk.h>
10 #include <dm.h>
11 #include <generic-phy.h>
12 #include <pci.h>
13 #include <power-domain.h>
14 #include <power/regulator.h>
15 #include <reset.h>
16 #include <syscon.h>
17 #include <asm/io.h>
18 #include <asm-generic/gpio.h>
19 #include <asm/arch-rockchip/clock.h>
20 #include <linux/iopoll.h>
21 #include <linux/ioport.h>
22
23 DECLARE_GLOBAL_DATA_PTR;
24
25 #define RK_PCIE_DBG 0
26
27 #define __pcie_dev_print_emit(fmt, ...) \
28 ({ \
29 printf(fmt, ##__VA_ARGS__); \
30 })
31
32 #ifdef dev_err
33 #undef dev_err
34 #define dev_err(dev, fmt, ...) \
35 ({ \
36 if (dev) \
37 __pcie_dev_print_emit("%s: " fmt, dev->name, \
38 ##__VA_ARGS__); \
39 })
40 #endif
41
42 #ifdef dev_info
43 #undef dev_info
44 #define dev_info dev_err
45 #endif
46
47 #ifdef DEBUG
48 #define dev_dbg dev_err
49 #else
50 #define dev_dbg(dev, fmt, ...) \
51 ({ \
52 if (0) \
53 __dev_printk(7, dev, fmt, ##__VA_ARGS__); \
54 })
55 #endif
56
57 struct rk_pcie {
58 struct udevice *dev;
59 struct udevice *vpcie3v3;
60 void *dbi_base;
61 void *apb_base;
62 void *cfg_base;
63 fdt_size_t cfg_size;
64 struct phy phy;
65 struct clk_bulk clks;
66 int first_busno;
67 struct reset_ctl_bulk rsts;
68 struct gpio_desc rst_gpio;
69 struct pci_region io;
70 struct pci_region mem;
71 bool is_bifurcation;
72 u32 gen;
73 };
74
75 enum {
76 PCIBIOS_SUCCESSFUL = 0x0000,
77 PCIBIOS_UNSUPPORTED = -ENODEV,
78 PCIBIOS_NODEV = -ENODEV,
79 };
80
81 #define msleep(a) udelay((a) * 1000)
82
83 /* Parameters for the waiting for iATU enabled routine */
84 #define PCIE_CLIENT_GENERAL_DEBUG 0x104
85 #define PCIE_CLIENT_HOT_RESET_CTRL 0x180
86 #define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
87 #define PCIE_CLIENT_LTSSM_STATUS 0x300
88 #define SMLH_LINKUP BIT(16)
89 #define RDLH_LINKUP BIT(17)
90 #define PCIE_CLIENT_DBG_FIFO_MODE_CON 0x310
91 #define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D0 0x320
92 #define PCIE_CLIENT_DBG_FIFO_PTN_HIT_D1 0x324
93 #define PCIE_CLIENT_DBG_FIFO_TRN_HIT_D0 0x328
94 #define PCIE_CLIENT_DBG_FIFO_TRN_HIT_D1 0x32c
95 #define PCIE_CLIENT_DBG_FIFO_STATUS 0x350
96 #define PCIE_CLIENT_DBG_TRANSITION_DATA 0xffff0000
97 #define PCIE_CLIENT_DBF_EN 0xffff0003
98
99 /* PCI DBICS registers */
100 #define PCIE_LINK_STATUS_REG 0x80
101 #define PCIE_LINK_STATUS_SPEED_OFF 16
102 #define PCIE_LINK_STATUS_SPEED_MASK (0xf << PCIE_LINK_STATUS_SPEED_OFF)
103 #define PCIE_LINK_STATUS_WIDTH_OFF 20
104 #define PCIE_LINK_STATUS_WIDTH_MASK (0xf << PCIE_LINK_STATUS_WIDTH_OFF)
105
106 #define PCIE_LINK_CAPABILITY 0x7c
107 #define PCIE_LINK_CTL_2 0xa0
108 #define TARGET_LINK_SPEED_MASK 0xf
109 #define LINK_SPEED_GEN_1 0x1
110 #define LINK_SPEED_GEN_2 0x2
111 #define LINK_SPEED_GEN_3 0x3
112
113 #define PCIE_MISC_CONTROL_1_OFF 0x8bc
114 #define PCIE_DBI_RO_WR_EN BIT(0)
115
116 #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80c
117 #define PORT_LOGIC_SPEED_CHANGE BIT(17)
118
119 /*
120 * iATU Unroll-specific register definitions
121 * From 4.80 core version the address translation will be made by unroll.
122 * The registers are offset from atu_base
123 */
124 #define PCIE_ATU_UNR_REGION_CTRL1 0x00
125 #define PCIE_ATU_UNR_REGION_CTRL2 0x04
126 #define PCIE_ATU_UNR_LOWER_BASE 0x08
127 #define PCIE_ATU_UNR_UPPER_BASE 0x0c
128 #define PCIE_ATU_UNR_LIMIT 0x10
129 #define PCIE_ATU_UNR_LOWER_TARGET 0x14
130 #define PCIE_ATU_UNR_UPPER_TARGET 0x18
131
132 #define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
133 #define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
134 #define PCIE_ATU_TYPE_MEM (0x0 << 0)
135 #define PCIE_ATU_TYPE_IO (0x2 << 0)
136 #define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
137 #define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
138 #define PCIE_ATU_ENABLE (0x1 << 31)
139 #define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
140 #define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
141 #define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
142 #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
143
144 /* Register address builder */
145 #define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
146 ((0x3 << 20) | ((region) << 9))
147 #define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
148 ((0x3 << 20) | ((region) << 9) | (0x1 << 8))
149
150 /* Parameters for the waiting for iATU enabled routine */
151 #define LINK_WAIT_MAX_IATU_RETRIES 5
152 #define LINK_WAIT_IATU 10000
153
154 #define PCIE_TYPE0_HDR_DBI2_OFFSET 0x100000
155
rk_pcie_read(void __iomem * addr,int size,u32 * val)156 static int rk_pcie_read(void __iomem *addr, int size, u32 *val)
157 {
158 if ((uintptr_t)addr & (size - 1)) {
159 *val = 0;
160 return PCIBIOS_UNSUPPORTED;
161 }
162
163 if (size == 4) {
164 *val = readl(addr);
165 } else if (size == 2) {
166 *val = readw(addr);
167 } else if (size == 1) {
168 *val = readb(addr);
169 } else {
170 *val = 0;
171 return PCIBIOS_NODEV;
172 }
173
174 return PCIBIOS_SUCCESSFUL;
175 }
176
rk_pcie_write(void __iomem * addr,int size,u32 val)177 static int rk_pcie_write(void __iomem *addr, int size, u32 val)
178 {
179 if ((uintptr_t)addr & (size - 1))
180 return PCIBIOS_UNSUPPORTED;
181
182 if (size == 4)
183 writel(val, addr);
184 else if (size == 2)
185 writew(val, addr);
186 else if (size == 1)
187 writeb(val, addr);
188 else
189 return PCIBIOS_NODEV;
190
191 return PCIBIOS_SUCCESSFUL;
192 }
193
__rk_pcie_read_apb(struct rk_pcie * rk_pcie,void __iomem * base,u32 reg,size_t size)194 static u32 __rk_pcie_read_apb(struct rk_pcie *rk_pcie, void __iomem *base,
195 u32 reg, size_t size)
196 {
197 int ret;
198 u32 val;
199
200 ret = rk_pcie_read(base + reg, size, &val);
201 if (ret)
202 dev_err(rk_pcie->dev, "Read APB address failed\n");
203
204 return val;
205 }
206
__rk_pcie_write_apb(struct rk_pcie * rk_pcie,void __iomem * base,u32 reg,size_t size,u32 val)207 static void __rk_pcie_write_apb(struct rk_pcie *rk_pcie, void __iomem *base,
208 u32 reg, size_t size, u32 val)
209 {
210 int ret;
211
212 ret = rk_pcie_write(base + reg, size, val);
213 if (ret)
214 dev_err(rk_pcie->dev, "Write APB address failed\n");
215 }
216
rk_pcie_readl_apb(struct rk_pcie * rk_pcie,u32 reg)217 static inline u32 rk_pcie_readl_apb(struct rk_pcie *rk_pcie, u32 reg)
218 {
219 return __rk_pcie_read_apb(rk_pcie, rk_pcie->apb_base, reg, 0x4);
220 }
221
rk_pcie_writel_apb(struct rk_pcie * rk_pcie,u32 reg,u32 val)222 static inline void rk_pcie_writel_apb(struct rk_pcie *rk_pcie, u32 reg,
223 u32 val)
224 {
225 __rk_pcie_write_apb(rk_pcie, rk_pcie->apb_base, reg, 0x4, val);
226 }
227
rk_pcie_get_link_speed(struct rk_pcie * rk_pcie)228 static int rk_pcie_get_link_speed(struct rk_pcie *rk_pcie)
229 {
230 return (readl(rk_pcie->dbi_base + PCIE_LINK_STATUS_REG) &
231 PCIE_LINK_STATUS_SPEED_MASK) >> PCIE_LINK_STATUS_SPEED_OFF;
232 }
233
rk_pcie_get_link_width(struct rk_pcie * rk_pcie)234 static int rk_pcie_get_link_width(struct rk_pcie *rk_pcie)
235 {
236 return (readl(rk_pcie->dbi_base + PCIE_LINK_STATUS_REG) &
237 PCIE_LINK_STATUS_WIDTH_MASK) >> PCIE_LINK_STATUS_WIDTH_OFF;
238 }
239
rk_pcie_writel_ob_unroll(struct rk_pcie * rk_pcie,u32 index,u32 reg,u32 val)240 static void rk_pcie_writel_ob_unroll(struct rk_pcie *rk_pcie, u32 index,
241 u32 reg, u32 val)
242 {
243 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
244 void __iomem *base = rk_pcie->dbi_base;
245
246 writel(val, base + offset + reg);
247 }
248
rk_pcie_readl_ob_unroll(struct rk_pcie * rk_pcie,u32 index,u32 reg)249 static u32 rk_pcie_readl_ob_unroll(struct rk_pcie *rk_pcie, u32 index, u32 reg)
250 {
251 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
252 void __iomem *base = rk_pcie->dbi_base;
253
254 return readl(base + offset + reg);
255 }
256
rk_pcie_dbi_write_enable(struct rk_pcie * rk_pcie,bool en)257 static inline void rk_pcie_dbi_write_enable(struct rk_pcie *rk_pcie, bool en)
258 {
259 u32 val;
260
261 val = readl(rk_pcie->dbi_base + PCIE_MISC_CONTROL_1_OFF);
262
263 if (en)
264 val |= PCIE_DBI_RO_WR_EN;
265 else
266 val &= ~PCIE_DBI_RO_WR_EN;
267 writel(val, rk_pcie->dbi_base + PCIE_MISC_CONTROL_1_OFF);
268 }
269
rk_pcie_setup_host(struct rk_pcie * rk_pcie)270 static void rk_pcie_setup_host(struct rk_pcie *rk_pcie)
271 {
272 u32 val;
273
274 rk_pcie_dbi_write_enable(rk_pcie, true);
275
276 /* setup RC BARs */
277 writel(PCI_BASE_ADDRESS_MEM_TYPE_64,
278 rk_pcie->dbi_base + PCI_BASE_ADDRESS_0);
279 writel(0x0, rk_pcie->dbi_base + PCI_BASE_ADDRESS_1);
280
281 /* setup interrupt pins */
282 val = readl(rk_pcie->dbi_base + PCI_INTERRUPT_LINE);
283 val &= 0xffff00ff;
284 val |= 0x00000100;
285 writel(val, rk_pcie->dbi_base + PCI_INTERRUPT_LINE);
286
287 /* setup bus numbers */
288 val = readl(rk_pcie->dbi_base + PCI_PRIMARY_BUS);
289 val &= 0xff000000;
290 val |= 0x00ff0100;
291 writel(val, rk_pcie->dbi_base + PCI_PRIMARY_BUS);
292
293 val = readl(rk_pcie->dbi_base + PCI_PRIMARY_BUS);
294
295 /* setup command register */
296 val = readl(rk_pcie->dbi_base + PCI_COMMAND);
297 val &= 0xffff0000;
298 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
299 PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
300 writel(val, rk_pcie->dbi_base + PCI_COMMAND);
301
302 /* program correct class for RC */
303 writew(PCI_CLASS_BRIDGE_PCI, rk_pcie->dbi_base + PCI_CLASS_DEVICE);
304 /* Better disable write permission right after the update */
305
306 val = readl(rk_pcie->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
307 val |= PORT_LOGIC_SPEED_CHANGE;
308 writel(val, rk_pcie->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
309
310 /* Disable BAR0 BAR1 */
311 writel(0, rk_pcie->dbi_base + PCIE_TYPE0_HDR_DBI2_OFFSET + 0x10 + 0 * 4);
312 writel(0, rk_pcie->dbi_base + PCIE_TYPE0_HDR_DBI2_OFFSET + 0x10 + 1 * 4);
313
314 rk_pcie_dbi_write_enable(rk_pcie, false);
315 }
316
rk_pcie_configure(struct rk_pcie * pci,u32 cap_speed)317 static void rk_pcie_configure(struct rk_pcie *pci, u32 cap_speed)
318 {
319 u32 val;
320
321 rk_pcie_dbi_write_enable(pci, true);
322
323 val = readl(pci->dbi_base + PCIE_LINK_CAPABILITY);
324 val &= ~TARGET_LINK_SPEED_MASK;
325 val |= cap_speed;
326 writel(val, pci->dbi_base + PCIE_LINK_CAPABILITY);
327
328 val = readl(pci->dbi_base + PCIE_LINK_CTL_2);
329 val &= ~TARGET_LINK_SPEED_MASK;
330 val |= cap_speed;
331 writel(val, pci->dbi_base + PCIE_LINK_CTL_2);
332
333 rk_pcie_dbi_write_enable(pci, false);
334 }
335
rk_pcie_prog_outbound_atu_unroll(struct rk_pcie * pci,int index,int type,u64 cpu_addr,u64 pci_addr,u32 size)336 static void rk_pcie_prog_outbound_atu_unroll(struct rk_pcie *pci, int index,
337 int type, u64 cpu_addr,
338 u64 pci_addr, u32 size)
339 {
340 u32 retries, val;
341
342 dev_dbg(pci->dev, "ATU programmed with: index: %d, type: %d, cpu addr: %8llx, pci addr: %8llx, size: %8x\n",
343 index, type, cpu_addr, pci_addr, size);
344
345 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
346 lower_32_bits(cpu_addr));
347 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
348 upper_32_bits(cpu_addr));
349 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
350 lower_32_bits(cpu_addr + size - 1));
351 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
352 lower_32_bits(pci_addr));
353 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
354 upper_32_bits(pci_addr));
355 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
356 type);
357 rk_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
358 PCIE_ATU_ENABLE);
359
360 /*
361 * Make sure ATU enable takes effect before any subsequent config
362 * and I/O accesses.
363 */
364 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
365 val = rk_pcie_readl_ob_unroll(pci, index,
366 PCIE_ATU_UNR_REGION_CTRL2);
367 if (val & PCIE_ATU_ENABLE)
368 return;
369
370 udelay(LINK_WAIT_IATU);
371 }
372 dev_err(pci->dev, "outbound iATU is not being enabled\n");
373 }
374
rk_pcie_addr_valid(pci_dev_t d,int first_busno)375 static int rk_pcie_addr_valid(pci_dev_t d, int first_busno)
376 {
377 if ((PCI_BUS(d) == first_busno) && (PCI_DEV(d) > 0))
378 return 0;
379 if ((PCI_BUS(d) == first_busno + 1) && (PCI_DEV(d) > 0))
380 return 0;
381
382 return 1;
383 }
384
set_cfg_address(struct rk_pcie * pcie,pci_dev_t d,uint where)385 static uintptr_t set_cfg_address(struct rk_pcie *pcie,
386 pci_dev_t d, uint where)
387 {
388 int bus = PCI_BUS(d) - pcie->first_busno;
389 uintptr_t va_address;
390 u32 atu_type;
391
392 /* Use dbi_base for own configuration read and write */
393 if (!bus) {
394 va_address = (uintptr_t)pcie->dbi_base;
395 goto out;
396 }
397
398 if (bus == 1)
399 /*
400 * For local bus whose primary bus number is root bridge,
401 * change TLP Type field to 4.
402 */
403 atu_type = PCIE_ATU_TYPE_CFG0;
404 else
405 /* Otherwise, change TLP Type field to 5. */
406 atu_type = PCIE_ATU_TYPE_CFG1;
407
408 /*
409 * Not accessing root port configuration space?
410 * Region #0 is used for Outbound CFG space access.
411 * Direction = Outbound
412 * Region Index = 0
413 */
414 d = PCI_MASK_BUS(d);
415 d = PCI_ADD_BUS(bus, d);
416 rk_pcie_prog_outbound_atu_unroll(pcie, PCIE_ATU_REGION_INDEX1,
417 atu_type, (u64)pcie->cfg_base,
418 d << 8, pcie->cfg_size);
419
420 va_address = (uintptr_t)pcie->cfg_base;
421
422 out:
423 va_address += where & ~0x3;
424
425 return va_address;
426 }
427
rockchip_pcie_rd_conf(struct udevice * bus,pci_dev_t bdf,uint offset,ulong * valuep,enum pci_size_t size)428 static int rockchip_pcie_rd_conf(struct udevice *bus, pci_dev_t bdf,
429 uint offset, ulong *valuep,
430 enum pci_size_t size)
431 {
432 struct rk_pcie *pcie = dev_get_priv(bus);
433 uintptr_t va_address;
434 ulong value;
435
436 debug("PCIE CFG read: bdf=%2x:%2x:%2x\n",
437 PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf));
438
439 if (!rk_pcie_addr_valid(bdf, pcie->first_busno)) {
440 debug("- out of range\n");
441 *valuep = pci_get_ff(size);
442 return 0;
443 }
444
445 va_address = set_cfg_address(pcie, bdf, offset);
446
447 value = readl(va_address);
448
449 debug("(addr,val)=(0x%04x, 0x%08lx)\n", offset, value);
450 *valuep = pci_conv_32_to_size(value, offset, size);
451
452 rk_pcie_prog_outbound_atu_unroll(pcie, PCIE_ATU_REGION_INDEX1,
453 PCIE_ATU_TYPE_IO, pcie->io.phys_start,
454 pcie->io.bus_start, pcie->io.size);
455
456 return 0;
457 }
458
rockchip_pcie_wr_conf(struct udevice * bus,pci_dev_t bdf,uint offset,ulong value,enum pci_size_t size)459 static int rockchip_pcie_wr_conf(struct udevice *bus, pci_dev_t bdf,
460 uint offset, ulong value,
461 enum pci_size_t size)
462 {
463 struct rk_pcie *pcie = dev_get_priv(bus);
464 uintptr_t va_address;
465 ulong old;
466
467 debug("PCIE CFG write: (b,d,f)=(%2d,%2d,%2d)\n",
468 PCI_BUS(bdf), PCI_DEV(bdf), PCI_FUNC(bdf));
469 debug("(addr,val)=(0x%04x, 0x%08lx)\n", offset, value);
470
471 if (!rk_pcie_addr_valid(bdf, pcie->first_busno)) {
472 debug("- out of range\n");
473 return 0;
474 }
475
476 va_address = set_cfg_address(pcie, bdf, offset);
477
478 old = readl(va_address);
479 value = pci_conv_size_to_32(old, value, offset, size);
480 writel(value, va_address);
481
482 rk_pcie_prog_outbound_atu_unroll(pcie, PCIE_ATU_REGION_INDEX1,
483 PCIE_ATU_TYPE_IO, pcie->io.phys_start,
484 pcie->io.bus_start, pcie->io.size);
485
486 return 0;
487 }
488
rk_pcie_enable_debug(struct rk_pcie * rk_pcie)489 static void rk_pcie_enable_debug(struct rk_pcie *rk_pcie)
490 {
491 #if RK_PCIE_DBG
492 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_PTN_HIT_D0,
493 PCIE_CLIENT_DBG_TRANSITION_DATA);
494 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_PTN_HIT_D1,
495 PCIE_CLIENT_DBG_TRANSITION_DATA);
496 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_TRN_HIT_D0,
497 PCIE_CLIENT_DBG_TRANSITION_DATA);
498 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_TRN_HIT_D1,
499 PCIE_CLIENT_DBG_TRANSITION_DATA);
500 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_MODE_CON,
501 PCIE_CLIENT_DBF_EN);
502 #endif
503 }
504
rk_pcie_debug_dump(struct rk_pcie * rk_pcie)505 static void rk_pcie_debug_dump(struct rk_pcie *rk_pcie)
506 {
507 #if RK_PCIE_DBG
508 u32 loop;
509
510 dev_err(rk_pcie->dev, "ltssm = 0x%x\n",
511 rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_LTSSM_STATUS));
512 for (loop = 0; loop < 64; loop++)
513 dev_err(rk_pcie->dev, "fifo_status = 0x%x\n",
514 rk_pcie_readl_apb(rk_pcie, PCIE_CLIENT_DBG_FIFO_STATUS));
515 #endif
516 }
517
rk_pcie_link_status_clear(struct rk_pcie * rk_pcie)518 static inline void rk_pcie_link_status_clear(struct rk_pcie *rk_pcie)
519 {
520 rk_pcie_writel_apb(rk_pcie, PCIE_CLIENT_GENERAL_DEBUG, 0x0);
521 }
522
rk_pcie_disable_ltssm(struct rk_pcie * rk_pcie)523 static inline void rk_pcie_disable_ltssm(struct rk_pcie *rk_pcie)
524 {
525 rk_pcie_writel_apb(rk_pcie, 0x0, 0xc0008);
526 }
527
rk_pcie_enable_ltssm(struct rk_pcie * rk_pcie)528 static inline void rk_pcie_enable_ltssm(struct rk_pcie *rk_pcie)
529 {
530 rk_pcie_writel_apb(rk_pcie, 0x0, 0xc000c);
531 }
532
is_link_up(struct rk_pcie * priv)533 static int is_link_up(struct rk_pcie *priv)
534 {
535 u32 val;
536
537 val = rk_pcie_readl_apb(priv, PCIE_CLIENT_LTSSM_STATUS);
538 if ((val & (RDLH_LINKUP | SMLH_LINKUP)) == 0x30000 &&
539 (val & GENMASK(5, 0)) == 0x11)
540 return 1;
541
542 return 0;
543 }
544
rk_pcie_link_up(struct rk_pcie * priv,u32 cap_speed)545 static int rk_pcie_link_up(struct rk_pcie *priv, u32 cap_speed)
546 {
547 int retries;
548
549 if (is_link_up(priv)) {
550 printf("PCI Link already up before configuration!\n");
551 return 1;
552 }
553
554 /* DW pre link configurations */
555 rk_pcie_configure(priv, cap_speed);
556
557 /* Release the device */
558 if (dm_gpio_is_valid(&priv->rst_gpio)) {
559 /*
560 * T_PVPERL (Power stable to PERST# inactive) should be a minimum of 100ms.
561 * We add a 200ms by default for sake of hoping everthings
562 * work fine.
563 */
564 msleep(200);
565 dm_gpio_set_value(&priv->rst_gpio, 1);
566 /*
567 * Add this 20ms delay because we observe link is always up stably after it and
568 * could help us save 20ms for scanning devices.
569 */
570 msleep(20);
571 }
572
573 rk_pcie_disable_ltssm(priv);
574 rk_pcie_link_status_clear(priv);
575 rk_pcie_enable_debug(priv);
576
577 /* Enable LTSSM */
578 rk_pcie_enable_ltssm(priv);
579
580 for (retries = 0; retries < 50; retries++) {
581 if (is_link_up(priv)) {
582 dev_info(priv->dev, "PCIe Link up, LTSSM is 0x%x\n",
583 rk_pcie_readl_apb(priv, PCIE_CLIENT_LTSSM_STATUS));
584 rk_pcie_debug_dump(priv);
585 /* Link maybe in Gen switch recovery but we need to wait more 1s */
586 msleep(1000);
587 return 0;
588 }
589
590 dev_info(priv->dev, "PCIe Linking... LTSSM is 0x%x\n",
591 rk_pcie_readl_apb(priv, PCIE_CLIENT_LTSSM_STATUS));
592 rk_pcie_debug_dump(priv);
593 msleep(10);
594 }
595
596 dev_err(priv->dev, "PCIe-%d Link Fail\n", priv->dev->seq);
597 return -EINVAL;
598 }
599
rockchip_pcie_init_port(struct udevice * dev)600 static int rockchip_pcie_init_port(struct udevice *dev)
601 {
602 int ret;
603 u32 val;
604 struct rk_pcie *priv = dev_get_priv(dev);
605 union phy_configure_opts phy_cfg;
606
607 /* Rest the device */
608 if (dm_gpio_is_valid(&priv->rst_gpio))
609 dm_gpio_set_value(&priv->rst_gpio, 0);
610
611 /* Set power and maybe external ref clk input */
612 if (priv->vpcie3v3) {
613 ret = regulator_set_enable(priv->vpcie3v3, true);
614 if (ret) {
615 dev_err(priv->dev, "failed to enable vpcie3v3 (ret=%d)\n",
616 ret);
617 return ret;
618 }
619 }
620
621 if (priv->is_bifurcation) {
622 phy_cfg.pcie.is_bifurcation = true;
623 ret = generic_phy_configure(&priv->phy, &phy_cfg);
624 if (ret)
625 dev_err(dev, "failed to set bifurcation for phy (ret=%d)\n", ret);
626 }
627
628 ret = generic_phy_init(&priv->phy);
629 if (ret) {
630 dev_err(dev, "failed to init phy (ret=%d)\n", ret);
631 return ret;
632 }
633
634 ret = generic_phy_power_on(&priv->phy);
635 if (ret) {
636 dev_err(dev, "failed to power on phy (ret=%d)\n", ret);
637 goto err_exit_phy;
638 }
639
640 ret = reset_deassert_bulk(&priv->rsts);
641 if (ret) {
642 dev_err(dev, "failed to deassert resets (ret=%d)\n", ret);
643 goto err_power_off_phy;
644 }
645
646 ret = clk_enable_bulk(&priv->clks);
647 if (ret) {
648 dev_err(dev, "failed to enable clks (ret=%d)\n", ret);
649 goto err_deassert_bulk;
650 }
651
652 /* LTSSM EN ctrl mode */
653 val = rk_pcie_readl_apb(priv, PCIE_CLIENT_HOT_RESET_CTRL);
654 val |= PCIE_LTSSM_ENABLE_ENHANCE | (PCIE_LTSSM_ENABLE_ENHANCE << 16);
655 rk_pcie_writel_apb(priv, PCIE_CLIENT_HOT_RESET_CTRL, val);
656
657 /* Set RC mode */
658 rk_pcie_writel_apb(priv, 0x0, 0xf00040);
659 rk_pcie_setup_host(priv);
660
661 ret = rk_pcie_link_up(priv, priv->gen);
662 if (ret < 0)
663 goto err_link_up;
664
665 return 0;
666 err_link_up:
667 clk_disable_bulk(&priv->clks);
668 err_deassert_bulk:
669 reset_assert_bulk(&priv->rsts);
670 err_power_off_phy:
671 generic_phy_power_off(&priv->phy);
672 err_exit_phy:
673 generic_phy_exit(&priv->phy);
674 return ret;
675 }
676
rockchip_pcie_parse_dt(struct udevice * dev)677 static int rockchip_pcie_parse_dt(struct udevice *dev)
678 {
679 struct rk_pcie *priv = dev_get_priv(dev);
680 u32 max_link_speed;
681 int ret;
682 struct resource res;
683
684 ret = dev_read_resource_byname(dev, "pcie-dbi", &res);
685 if (ret)
686 return -ENODEV;
687 priv->dbi_base = (void *)(res.start);
688 dev_dbg(dev, "DBI address is 0x%p\n", priv->dbi_base);
689
690 ret = dev_read_resource_byname(dev, "pcie-apb", &res);
691 if (ret)
692 return -ENODEV;
693 priv->apb_base = (void *)(res.start);
694 dev_dbg(dev, "APB address is 0x%p\n", priv->apb_base);
695
696 ret = gpio_request_by_name(dev, "reset-gpios", 0,
697 &priv->rst_gpio, GPIOD_IS_OUT);
698 if (ret) {
699 dev_err(dev, "failed to find reset-gpios property\n");
700 return ret;
701 }
702
703 ret = reset_get_bulk(dev, &priv->rsts);
704 if (ret) {
705 dev_err(dev, "Can't get reset: %d\n", ret);
706 return ret;
707 }
708
709 ret = clk_get_bulk(dev, &priv->clks);
710 if (ret) {
711 dev_err(dev, "Can't get clock: %d\n", ret);
712 return ret;
713 }
714
715 ret = device_get_supply_regulator(dev, "vpcie3v3-supply",
716 &priv->vpcie3v3);
717 if (ret && ret != -ENOENT) {
718 dev_err(dev, "failed to get vpcie3v3 supply (ret=%d)\n", ret);
719 return ret;
720 }
721
722 ret = generic_phy_get_by_index(dev, 0, &priv->phy);
723 if (ret) {
724 dev_err(dev, "failed to get pcie phy (ret=%d)\n", ret);
725 return ret;
726 }
727
728 if (dev_read_bool(dev, "rockchip,bifurcation"))
729 priv->is_bifurcation = true;
730
731 ret = ofnode_read_u32(dev->node, "max-link-speed", &max_link_speed);
732 if (ret < 0 || max_link_speed > 4)
733 priv->gen = 0;
734 else
735 priv->gen = max_link_speed;
736
737 return 0;
738 }
739
rockchip_pcie_probe(struct udevice * dev)740 static int rockchip_pcie_probe(struct udevice *dev)
741 {
742 struct rk_pcie *priv = dev_get_priv(dev);
743 struct udevice *ctlr = pci_get_controller(dev);
744 struct pci_controller *hose = dev_get_uclass_priv(ctlr);
745 int ret;
746
747 priv->first_busno = dev->seq;
748 priv->dev = dev;
749
750 ret = rockchip_pcie_parse_dt(dev);
751 if (ret)
752 return ret;
753
754 ret = rockchip_pcie_init_port(dev);
755 if (ret)
756 return ret;
757
758 dev_info(dev, "PCIE-%d: Link up (Gen%d-x%d, Bus%d)\n",
759 dev->seq, rk_pcie_get_link_speed(priv),
760 rk_pcie_get_link_width(priv),
761 hose->first_busno);
762
763 for (ret = 0; ret < hose->region_count; ret++) {
764 if (hose->regions[ret].flags == PCI_REGION_IO) {
765 priv->io.phys_start = hose->regions[ret].phys_start; /* IO base */
766 priv->io.bus_start = hose->regions[ret].bus_start; /* IO_bus_addr */
767 priv->io.size = hose->regions[ret].size; /* IO size */
768 } else if (hose->regions[ret].flags == PCI_REGION_MEM) {
769 priv->mem.phys_start = hose->regions[ret].phys_start; /* MEM base */
770 priv->mem.bus_start = hose->regions[ret].bus_start; /* MEM_bus_addr */
771 priv->mem.size = hose->regions[ret].size; /* MEM size */
772 } else if (hose->regions[ret].flags == PCI_REGION_SYS_MEMORY) {
773 priv->cfg_base = (void *)(priv->io.phys_start - priv->io.size);
774 priv->cfg_size = priv->io.size;
775 } else {
776 dev_err(dev, "invalid flags type!\n");
777 }
778 }
779
780 dev_dbg(dev, "Config space: [0x%p - 0x%p, size 0x%llx]\n",
781 priv->cfg_base, priv->cfg_base + priv->cfg_size,
782 priv->cfg_size);
783
784 dev_dbg(dev, "IO space: [0x%llx - 0x%llx, size 0x%x]\n",
785 priv->io.phys_start, priv->io.phys_start + priv->io.size,
786 priv->io.size);
787
788 dev_dbg(dev, "IO bus: [0x%x - 0x%x, size 0x%x]\n",
789 priv->io.bus_start, priv->io.bus_start + priv->io.size,
790 priv->io.size);
791
792 dev_dbg(dev, "MEM space: [0x%llx - 0x%llx, size 0x%x]\n",
793 priv->mem.phys_start, priv->mem.phys_start + priv->mem.size,
794 priv->mem.size);
795
796 dev_dbg(dev, "MEM bus: [0x%x - 0x%x, size 0x%x]\n",
797 priv->mem.bus_start, priv->mem.bus_start + priv->mem.size,
798 priv->mem.size);
799
800 rk_pcie_prog_outbound_atu_unroll(priv, PCIE_ATU_REGION_INDEX0,
801 PCIE_ATU_TYPE_MEM,
802 priv->mem.phys_start,
803 priv->mem.bus_start, priv->mem.size);
804 return 0;
805 }
806
807 static const struct dm_pci_ops rockchip_pcie_ops = {
808 .read_config = rockchip_pcie_rd_conf,
809 .write_config = rockchip_pcie_wr_conf,
810 };
811
812 static const struct udevice_id rockchip_pcie_ids[] = {
813 { .compatible = "rockchip,rk3528-pcie" },
814 { .compatible = "rockchip,rk3562-pcie" },
815 { .compatible = "rockchip,rk3568-pcie" },
816 { .compatible = "rockchip,rk3588-pcie" },
817 { }
818 };
819
820 U_BOOT_DRIVER(rockchip_pcie) = {
821 .name = "pcie_dw_rockchip",
822 .id = UCLASS_PCI,
823 .of_match = rockchip_pcie_ids,
824 .ops = &rockchip_pcie_ops,
825 .probe = rockchip_pcie_probe,
826 .priv_auto_alloc_size = sizeof(struct rk_pcie),
827 };
828