1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Rockchip AXI PCIe host controller driver
4 *
5 * Copyright (c) 2016 Rockchip, Inc.
6 *
7 * Author: Shawn Lin <shawn.lin@rock-chips.com>
8 * Wenrui Li <wenrui.li@rock-chips.com>
9 *
10 * Bits taken from Synopsys DesignWare Host controller driver and
11 * ARM PCI Host generic driver.
12 */
13
14 #include <linux/bitrev.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17 #include <linux/gpio/consumer.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/iopoll.h>
21 #include <linux/irq.h>
22 #include <linux/irqchip/chained_irq.h>
23 #include <linux/irqdomain.h>
24 #include <linux/kernel.h>
25 #include <linux/mfd/syscon.h>
26 #include <linux/module.h>
27 #include <linux/of_address.h>
28 #include <linux/of_device.h>
29 #include <linux/of_pci.h>
30 #include <linux/of_platform.h>
31 #include <linux/of_irq.h>
32 #include <linux/pci.h>
33 #include <linux/pci_ids.h>
34 #include <linux/phy/phy.h>
35 #include <linux/platform_device.h>
36 #include <linux/reset.h>
37 #include <linux/regmap.h>
38
39 #include "../pci.h"
40 #include "pcie-rockchip.h"
41 #include "rockchip-pcie-dma.h"
42
rk_pcie_start_dma_rk3399(struct dma_trx_obj * obj,struct dma_table * cur)43 static void rk_pcie_start_dma_rk3399(struct dma_trx_obj *obj, struct dma_table *cur)
44 {
45 struct rockchip_pcie *rockchip = dev_get_drvdata(obj->dev);
46 struct dma_table *tbl = cur;
47 int chn = tbl->chn;
48
49 rockchip_pcie_write(rockchip, (u32)(tbl->phys_descs & 0xffffffff),
50 PCIE_APB_CORE_UDMA_BASE + 0x14 * chn + 0x04);
51 rockchip_pcie_write(rockchip, (u32)(tbl->phys_descs >> 32),
52 PCIE_APB_CORE_UDMA_BASE + 0x14 * chn + 0x08);
53 rockchip_pcie_write(rockchip, BIT(0) | (tbl->dir << 1),
54 PCIE_APB_CORE_UDMA_BASE + 0x14 * chn + 0x00);
55 }
56
rk_pcie_config_dma_rk3399(struct dma_table * table)57 static void rk_pcie_config_dma_rk3399(struct dma_table *table)
58 {
59 u32 *desc = table->descs;
60
61 *(desc + 0) = (u32)(table->local & 0xffffffff);
62 *(desc + 1) = (u32)(table->local >> 32);
63 *(desc + 2) = (u32)(table->bus & 0xffffffff);
64 *(desc + 3) = (u32)(table->bus >> 32);
65 *(desc + 4) = 0;
66 *(desc + 5) = 0;
67 *(desc + 6) = table->buf_size;
68 *(desc + 7) = 0;
69 *(desc + 8) = 0;
70 *(desc + 6) |= 1 << 24;
71 }
72
rockchip_pcie_enable_bw_int(struct rockchip_pcie * rockchip)73 static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
74 {
75 u32 status;
76
77 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
78 status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
79 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
80 }
81
rockchip_pcie_clr_bw_int(struct rockchip_pcie * rockchip)82 static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
83 {
84 u32 status;
85
86 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
87 status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
88 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
89 }
90
rockchip_pcie_update_txcredit_mui(struct rockchip_pcie * rockchip)91 static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
92 {
93 u32 val;
94
95 /* Update Tx credit maximum update interval */
96 val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1);
97 val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK;
98 val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */
99 rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1);
100 }
101
rockchip_pcie_valid_device(struct rockchip_pcie * rockchip,struct pci_bus * bus,int dev)102 static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
103 struct pci_bus *bus, int dev)
104 {
105 /*
106 * Access only one slot on each root port.
107 * Do not read more than one device on the bus directly attached
108 * to RC's downstream side.
109 */
110 if (pci_is_root_bus(bus) || pci_is_root_bus(bus->parent))
111 return dev == 0;
112
113 return 1;
114 }
115
rockchip_pcie_lane_map(struct rockchip_pcie * rockchip)116 static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip)
117 {
118 u32 val;
119 u8 map;
120
121 if (rockchip->legacy_phy)
122 return GENMASK(MAX_LANE_NUM - 1, 0);
123
124 val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP);
125 map = val & PCIE_CORE_LANE_MAP_MASK;
126
127 /* The link may be using a reverse-indexed mapping. */
128 if (val & PCIE_CORE_LANE_MAP_REVERSE)
129 map = bitrev8(map) >> 4;
130
131 return map;
132 }
133
rockchip_pcie_rd_own_conf(struct rockchip_pcie * rockchip,int where,int size,u32 * val)134 static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
135 int where, int size, u32 *val)
136 {
137 void __iomem *addr;
138
139 addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
140
141 if (!IS_ALIGNED((uintptr_t)addr, size)) {
142 *val = 0;
143 return PCIBIOS_BAD_REGISTER_NUMBER;
144 }
145
146 if (size == 4) {
147 *val = readl(addr);
148 } else if (size == 2) {
149 *val = readw(addr);
150 } else if (size == 1) {
151 *val = readb(addr);
152 } else {
153 *val = 0;
154 return PCIBIOS_BAD_REGISTER_NUMBER;
155 }
156 return PCIBIOS_SUCCESSFUL;
157 }
158
rockchip_pcie_wr_own_conf(struct rockchip_pcie * rockchip,int where,int size,u32 val)159 static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
160 int where, int size, u32 val)
161 {
162 u32 mask, tmp, offset;
163 void __iomem *addr;
164
165 offset = where & ~0x3;
166 addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset;
167
168 if (size == 4) {
169 writel(val, addr);
170 return PCIBIOS_SUCCESSFUL;
171 }
172
173 mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
174
175 /*
176 * N.B. This read/modify/write isn't safe in general because it can
177 * corrupt RW1C bits in adjacent registers. But the hardware
178 * doesn't support smaller writes.
179 */
180 tmp = readl(addr) & mask;
181 tmp |= val << ((where & 0x3) * 8);
182 writel(tmp, addr);
183
184 return PCIBIOS_SUCCESSFUL;
185 }
186
rockchip_pcie_rd_other_conf(struct rockchip_pcie * rockchip,struct pci_bus * bus,u32 devfn,int where,int size,u32 * val)187 static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
188 struct pci_bus *bus, u32 devfn,
189 int where, int size, u32 *val)
190 {
191 u32 busdev;
192
193 if (rockchip->in_remove)
194 return PCIBIOS_SUCCESSFUL;
195
196 busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
197 PCI_FUNC(devfn), where);
198
199 if (!IS_ALIGNED(busdev, size)) {
200 *val = 0;
201 return PCIBIOS_BAD_REGISTER_NUMBER;
202 }
203
204 if (pci_is_root_bus(bus->parent))
205 rockchip_pcie_cfg_configuration_accesses(rockchip,
206 AXI_WRAPPER_TYPE0_CFG);
207 else
208 rockchip_pcie_cfg_configuration_accesses(rockchip,
209 AXI_WRAPPER_TYPE1_CFG);
210
211 if (size == 4) {
212 *val = readl(rockchip->reg_base + busdev);
213 } else if (size == 2) {
214 *val = readw(rockchip->reg_base + busdev);
215 } else if (size == 1) {
216 *val = readb(rockchip->reg_base + busdev);
217 } else {
218 *val = 0;
219 return PCIBIOS_BAD_REGISTER_NUMBER;
220 }
221 return PCIBIOS_SUCCESSFUL;
222 }
223
rockchip_pcie_wr_other_conf(struct rockchip_pcie * rockchip,struct pci_bus * bus,u32 devfn,int where,int size,u32 val)224 static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
225 struct pci_bus *bus, u32 devfn,
226 int where, int size, u32 val)
227 {
228 u32 busdev;
229
230 if (rockchip->in_remove)
231 return PCIBIOS_SUCCESSFUL;
232
233 busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn),
234 PCI_FUNC(devfn), where);
235 if (!IS_ALIGNED(busdev, size))
236 return PCIBIOS_BAD_REGISTER_NUMBER;
237
238 if (pci_is_root_bus(bus->parent))
239 rockchip_pcie_cfg_configuration_accesses(rockchip,
240 AXI_WRAPPER_TYPE0_CFG);
241 else
242 rockchip_pcie_cfg_configuration_accesses(rockchip,
243 AXI_WRAPPER_TYPE1_CFG);
244
245 if (size == 4)
246 writel(val, rockchip->reg_base + busdev);
247 else if (size == 2)
248 writew(val, rockchip->reg_base + busdev);
249 else if (size == 1)
250 writeb(val, rockchip->reg_base + busdev);
251 else
252 return PCIBIOS_BAD_REGISTER_NUMBER;
253
254 return PCIBIOS_SUCCESSFUL;
255 }
256
rockchip_pcie_rd_conf(struct pci_bus * bus,u32 devfn,int where,int size,u32 * val)257 static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
258 int size, u32 *val)
259 {
260 struct rockchip_pcie *rockchip = bus->sysdata;
261
262 if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) {
263 *val = 0xffffffff;
264 return PCIBIOS_DEVICE_NOT_FOUND;
265 }
266
267 if (pci_is_root_bus(bus))
268 return rockchip_pcie_rd_own_conf(rockchip, where, size, val);
269
270 return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size,
271 val);
272 }
273
rockchip_pcie_wr_conf(struct pci_bus * bus,u32 devfn,int where,int size,u32 val)274 static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
275 int where, int size, u32 val)
276 {
277 struct rockchip_pcie *rockchip = bus->sysdata;
278
279 if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn)))
280 return PCIBIOS_DEVICE_NOT_FOUND;
281
282 if (pci_is_root_bus(bus))
283 return rockchip_pcie_wr_own_conf(rockchip, where, size, val);
284
285 return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size,
286 val);
287 }
288
289 static struct pci_ops rockchip_pcie_ops = {
290 .read = rockchip_pcie_rd_conf,
291 .write = rockchip_pcie_wr_conf,
292 };
293
rockchip_pcie_set_power_limit(struct rockchip_pcie * rockchip)294 static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
295 {
296 int curr;
297 u32 status, scale, power;
298
299 if (IS_ERR(rockchip->vpcie3v3))
300 return;
301
302 /*
303 * Set RC's captured slot power limit and scale if
304 * vpcie3v3 available. The default values are both zero
305 * which means the software should set these two according
306 * to the actual power supply.
307 */
308 curr = regulator_get_current_limit(rockchip->vpcie3v3);
309 if (curr <= 0)
310 return;
311
312 scale = 3; /* 0.001x */
313 curr = curr / 1000; /* convert to mA */
314 power = (curr * 3300) / 1000; /* milliwatt */
315 while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
316 if (!scale) {
317 dev_warn(rockchip->dev, "invalid power supply\n");
318 return;
319 }
320 scale--;
321 power = power / 10;
322 }
323
324 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
325 status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
326 (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
327 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
328 }
329
330 /**
331 * rockchip_pcie_host_init_port - Initialize hardware
332 * @rockchip: PCIe port information
333 */
rockchip_pcie_host_init_port(struct rockchip_pcie * rockchip)334 static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
335 {
336 struct device *dev = rockchip->dev;
337 int err, i = MAX_LANE_NUM;
338 u32 status;
339 int timeouts = 500;
340
341 gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
342
343 err = rockchip_pcie_init_port(rockchip);
344 if (err)
345 return err;
346
347 /* Fix the transmitted FTS count desired to exit from L0s. */
348 status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1);
349 status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) |
350 (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT);
351 rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1);
352
353 rockchip_pcie_set_power_limit(rockchip);
354
355 /* Set RC's clock architecture as common clock */
356 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
357 status |= PCI_EXP_LNKSTA_SLC << 16;
358 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
359
360 /* Set RC's RCB to 128 */
361 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
362 status |= PCI_EXP_LNKCTL_RCB;
363 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
364
365 /* Enable Gen1 training */
366 rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
367 PCIE_CLIENT_CONFIG);
368
369 gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
370
371 if (rockchip->wait_ep)
372 timeouts = 10000;
373
374 /* 500ms timeout value should be enough for Gen1/2 training */
375 err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
376 status, PCIE_LINK_UP(status), 20,
377 timeouts * USEC_PER_MSEC);
378 if (err) {
379 dev_err(dev, "PCIe link training gen1 timeout!\n");
380 goto err_power_off_phy;
381 }
382
383 err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0,
384 status, PCIE_LINK_IS_L0(status), 20,
385 timeouts * USEC_PER_MSEC);
386 if (err) {
387 dev_err(dev, "LTSSM is not L0!\n");
388 return -ETIMEDOUT;
389 }
390
391 if (rockchip->link_gen == 2) {
392 /*
393 * Enable retrain for gen2. This should be configured only after
394 * gen1 finished.
395 */
396 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
397 status |= PCI_EXP_LNKCTL_RL;
398 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
399
400 err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
401 status, PCIE_LINK_IS_GEN2(status), 20,
402 500 * USEC_PER_MSEC);
403 if (err)
404 dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n");
405 }
406
407 /* Check the final link width from negotiated lane counter from MGMT */
408 status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
409 status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >>
410 PCIE_CORE_PL_CONF_LANE_SHIFT);
411 dev_dbg(dev, "current link width is x%d\n", status);
412
413 /* Power off unused lane(s) */
414 rockchip->lanes_map = rockchip_pcie_lane_map(rockchip);
415 for (i = 0; i < MAX_LANE_NUM; i++) {
416 if (!(rockchip->lanes_map & BIT(i))) {
417 dev_dbg(dev, "idling lane %d\n", i);
418 phy_power_off(rockchip->phys[i]);
419 }
420 }
421
422 /* disable ltssm */
423 if (rockchip->dma_trx_enabled)
424 rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_DISABLE,
425 PCIE_CLIENT_CONFIG);
426
427 rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
428 PCIE_CORE_CONFIG_VENDOR);
429 rockchip_pcie_write(rockchip,
430 PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT,
431 PCIE_RC_CONFIG_RID_CCR);
432
433 /* Clear THP cap's next cap pointer to remove L1 substate cap */
434 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP);
435 status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK;
436 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP);
437
438 /* Clear L0s from RC's link cap */
439 if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) {
440 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP);
441 status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S;
442 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
443 }
444
445 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
446 status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
447 status |= PCIE_RC_CONFIG_DCSR_MPS_256;
448 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
449
450 return 0;
451 err_power_off_phy:
452 while (i--)
453 phy_power_off(rockchip->phys[i]);
454 i = MAX_LANE_NUM;
455 while (i--)
456 phy_exit(rockchip->phys[i]);
457 return err;
458 }
459
460 static inline void
rockchip_pcie_handle_dma_interrupt(struct rockchip_pcie * rockchip)461 rockchip_pcie_handle_dma_interrupt(struct rockchip_pcie *rockchip)
462 {
463 u32 dma_status;
464 struct dma_trx_obj *obj = rockchip->dma_obj;
465
466 dma_status = rockchip_pcie_read(rockchip,
467 PCIE_APB_CORE_UDMA_BASE + PCIE_UDMA_INT_REG);
468
469 /* Core: clear dma interrupt */
470 rockchip_pcie_write(rockchip, dma_status,
471 PCIE_APB_CORE_UDMA_BASE + PCIE_UDMA_INT_REG);
472
473 WARN_ONCE(!(dma_status & 0x3), "dma_status 0x%x\n", dma_status);
474
475 if (dma_status & (1 << 0)) {
476 obj->irq_num++;
477 obj->dma_free = true;
478 }
479
480 if (list_empty(&obj->tbl_list)) {
481 if (obj->dma_free &&
482 obj->loop_count >= obj->loop_count_threshold)
483 complete(&obj->done);
484 }
485 }
486
rockchip_pcie_subsys_irq_handler(int irq,void * arg)487 static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
488 {
489 struct rockchip_pcie *rockchip = arg;
490 struct device *dev = rockchip->dev;
491 u32 reg;
492 u32 sub_reg;
493
494 reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
495 sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS);
496 dev_dbg(dev, "reg = 0x%x, sub_reg = 0x%x\n", reg, sub_reg);
497 if (reg & PCIE_CLIENT_INT_LOCAL) {
498 dev_dbg(dev, "local interrupt received\n");
499 if (sub_reg & PCIE_CORE_INT_PRFPE)
500 dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n");
501
502 if (sub_reg & PCIE_CORE_INT_CRFPE)
503 dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n");
504
505 if (sub_reg & PCIE_CORE_INT_RRPE)
506 dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n");
507
508 if (sub_reg & PCIE_CORE_INT_PRFO)
509 dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n");
510
511 if (sub_reg & PCIE_CORE_INT_CRFO)
512 dev_dbg(dev, "overflow occurred in the completion receive FIFO\n");
513
514 if (sub_reg & PCIE_CORE_INT_RT)
515 dev_dbg(dev, "replay timer timed out\n");
516
517 if (sub_reg & PCIE_CORE_INT_RTR)
518 dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n");
519
520 if (sub_reg & PCIE_CORE_INT_PE)
521 dev_dbg(dev, "phy error detected on receive side\n");
522
523 if (sub_reg & PCIE_CORE_INT_MTR)
524 dev_dbg(dev, "malformed TLP received from the link\n");
525
526 if (sub_reg & PCIE_CORE_INT_UCR)
527 dev_dbg(dev, "malformed TLP received from the link\n");
528
529 if (sub_reg & PCIE_CORE_INT_FCE)
530 dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
531
532 if (sub_reg & PCIE_CORE_INT_CT)
533 dev_dbg(dev, "a request timed out waiting for completion\n");
534
535 if (sub_reg & PCIE_CORE_INT_UTC)
536 dev_dbg(dev, "unmapped TC error\n");
537
538 if (sub_reg & PCIE_CORE_INT_MMVC)
539 dev_dbg(dev, "MSI mask register changes\n");
540
541 rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS);
542 } else if (reg & PCIE_CLIENT_INT_PHY) {
543 dev_dbg(dev, "phy link changes\n");
544 rockchip_pcie_update_txcredit_mui(rockchip);
545 rockchip_pcie_clr_bw_int(rockchip);
546 }
547
548 if (reg & PCIE_CLIENT_INT_UDMA) {
549 rockchip_pcie_write(rockchip, sub_reg, PCIE_CLIENT_INT_STATUS);
550 rockchip_pcie_write(rockchip, reg, PCIE_CLIENT_INT_STATUS);
551 rockchip_pcie_handle_dma_interrupt(rockchip);
552 }
553
554 rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL,
555 PCIE_CLIENT_INT_STATUS);
556
557 return IRQ_HANDLED;
558 }
559
rockchip_pcie_client_irq_handler(int irq,void * arg)560 static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
561 {
562 struct rockchip_pcie *rockchip = arg;
563 struct device *dev = rockchip->dev;
564 u32 reg;
565
566 reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
567 if (reg & PCIE_CLIENT_INT_LEGACY_DONE)
568 dev_dbg(dev, "legacy done interrupt received\n");
569
570 if (reg & PCIE_CLIENT_INT_MSG)
571 dev_dbg(dev, "message done interrupt received\n");
572
573 if (reg & PCIE_CLIENT_INT_HOT_RST)
574 dev_dbg(dev, "hot reset interrupt received\n");
575
576 if (reg & PCIE_CLIENT_INT_DPA)
577 dev_dbg(dev, "dpa interrupt received\n");
578
579 if (reg & PCIE_CLIENT_INT_FATAL_ERR)
580 dev_dbg(dev, "fatal error interrupt received\n");
581
582 if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
583 dev_dbg(dev, "no fatal error interrupt received\n");
584
585 if (reg & PCIE_CLIENT_INT_CORR_ERR)
586 dev_dbg(dev, "correctable error interrupt received\n");
587
588 if (reg & PCIE_CLIENT_INT_PHY)
589 dev_dbg(dev, "phy interrupt received\n");
590
591 rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE |
592 PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST |
593 PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR |
594 PCIE_CLIENT_INT_NFATAL_ERR |
595 PCIE_CLIENT_INT_CORR_ERR |
596 PCIE_CLIENT_INT_PHY),
597 PCIE_CLIENT_INT_STATUS);
598
599 return IRQ_HANDLED;
600 }
601
rockchip_pcie_legacy_int_handler(struct irq_desc * desc)602 static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
603 {
604 struct irq_chip *chip = irq_desc_get_chip(desc);
605 struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
606 struct device *dev = rockchip->dev;
607 u32 reg;
608 u32 hwirq;
609 u32 virq;
610
611 chained_irq_enter(chip, desc);
612
613 reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS);
614 reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT;
615
616 while (reg) {
617 hwirq = ffs(reg) - 1;
618 reg &= ~BIT(hwirq);
619
620 virq = irq_find_mapping(rockchip->irq_domain, hwirq);
621 if (virq)
622 generic_handle_irq(virq);
623 else
624 dev_err(dev, "unexpected IRQ, INT%d\n", hwirq);
625 }
626
627 chained_irq_exit(chip, desc);
628 }
629
rockchip_pcie_setup_irq(struct rockchip_pcie * rockchip)630 static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip)
631 {
632 int irq, err;
633 struct device *dev = rockchip->dev;
634 struct platform_device *pdev = to_platform_device(dev);
635
636 irq = platform_get_irq_byname(pdev, "sys");
637 if (irq < 0)
638 return irq;
639
640 err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler,
641 IRQF_SHARED, "pcie-sys", rockchip);
642 if (err) {
643 dev_err(dev, "failed to request PCIe subsystem IRQ\n");
644 return err;
645 }
646
647 irq = platform_get_irq_byname(pdev, "legacy");
648 if (irq < 0)
649 return irq;
650
651 irq_set_chained_handler_and_data(irq,
652 rockchip_pcie_legacy_int_handler,
653 rockchip);
654
655 irq = platform_get_irq_byname(pdev, "client");
656 if (irq < 0)
657 return irq;
658
659 err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler,
660 IRQF_SHARED, "pcie-client", rockchip);
661 if (err) {
662 dev_err(dev, "failed to request PCIe client IRQ\n");
663 return err;
664 }
665
666 return 0;
667 }
668
669 /**
670 * rockchip_pcie_parse_host_dt - Parse Device Tree
671 * @rockchip: PCIe port information
672 *
673 * Return: '0' on success and error value on failure
674 */
rockchip_pcie_parse_host_dt(struct rockchip_pcie * rockchip)675 static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
676 {
677 struct device *dev = rockchip->dev;
678 int err;
679
680 err = rockchip_pcie_parse_dt(rockchip);
681 if (err)
682 return err;
683
684 rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
685 if (IS_ERR(rockchip->vpcie12v)) {
686 if (PTR_ERR(rockchip->vpcie12v) != -ENODEV)
687 return PTR_ERR(rockchip->vpcie12v);
688 dev_info(dev, "no vpcie12v regulator found\n");
689 }
690
691 rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
692 if (IS_ERR(rockchip->vpcie3v3)) {
693 if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
694 return PTR_ERR(rockchip->vpcie3v3);
695 dev_info(dev, "no vpcie3v3 regulator found\n");
696 }
697
698 rockchip->vpcie1v8 = devm_regulator_get(dev, "vpcie1v8");
699 if (IS_ERR(rockchip->vpcie1v8))
700 return PTR_ERR(rockchip->vpcie1v8);
701
702 rockchip->vpcie0v9 = devm_regulator_get(dev, "vpcie0v9");
703 if (IS_ERR(rockchip->vpcie0v9))
704 return PTR_ERR(rockchip->vpcie0v9);
705
706 return 0;
707 }
708
rockchip_pcie_set_vpcie(struct rockchip_pcie * rockchip)709 static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
710 {
711 struct device *dev = rockchip->dev;
712 int err;
713
714 if (!IS_ERR(rockchip->vpcie12v)) {
715 err = regulator_enable(rockchip->vpcie12v);
716 if (err) {
717 dev_err(dev, "fail to enable vpcie12v regulator\n");
718 goto err_out;
719 }
720 }
721
722 if (!IS_ERR(rockchip->vpcie3v3)) {
723 err = regulator_enable(rockchip->vpcie3v3);
724 if (err) {
725 dev_err(dev, "fail to enable vpcie3v3 regulator\n");
726 goto err_disable_12v;
727 }
728 }
729
730 err = regulator_enable(rockchip->vpcie1v8);
731 if (err) {
732 dev_err(dev, "fail to enable vpcie1v8 regulator\n");
733 goto err_disable_3v3;
734 }
735
736 err = regulator_enable(rockchip->vpcie0v9);
737 if (err) {
738 dev_err(dev, "fail to enable vpcie0v9 regulator\n");
739 goto err_disable_1v8;
740 }
741
742 return 0;
743
744 err_disable_1v8:
745 regulator_disable(rockchip->vpcie1v8);
746 err_disable_3v3:
747 if (!IS_ERR(rockchip->vpcie3v3))
748 regulator_disable(rockchip->vpcie3v3);
749 err_disable_12v:
750 if (!IS_ERR(rockchip->vpcie12v))
751 regulator_disable(rockchip->vpcie12v);
752 err_out:
753 return err;
754 }
755
rockchip_pcie_enable_interrupts(struct rockchip_pcie * rockchip)756 static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip)
757 {
758 rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) &
759 (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK);
760 rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT),
761 PCIE_CORE_INT_MASK);
762
763 rockchip_pcie_enable_bw_int(rockchip);
764 rockchip_pcie_write(rockchip, PCIE_UDMA_INT_ENABLE_MASK,
765 PCIE_APB_CORE_UDMA_BASE + PCIE_UDMA_INT_ENABLE_REG);
766 }
767
rockchip_pcie_intx_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)768 static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
769 irq_hw_number_t hwirq)
770 {
771 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
772 irq_set_chip_data(irq, domain->host_data);
773
774 return 0;
775 }
776
777 static const struct irq_domain_ops intx_domain_ops = {
778 .map = rockchip_pcie_intx_map,
779 };
780
rockchip_pcie_init_irq_domain(struct rockchip_pcie * rockchip)781 static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
782 {
783 struct device *dev = rockchip->dev;
784 struct device_node *intc = of_get_next_child(dev->of_node, NULL);
785
786 if (!intc) {
787 dev_err(dev, "missing child interrupt-controller node\n");
788 return -EINVAL;
789 }
790
791 rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
792 &intx_domain_ops, rockchip);
793 of_node_put(intc);
794 if (!rockchip->irq_domain) {
795 dev_err(dev, "failed to get a INTx IRQ domain\n");
796 return -EINVAL;
797 }
798
799 return 0;
800 }
801
rockchip_pcie_prog_ob_atu(struct rockchip_pcie * rockchip,int region_no,int type,u8 num_pass_bits,u32 lower_addr,u32 upper_addr)802 static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip,
803 int region_no, int type, u8 num_pass_bits,
804 u32 lower_addr, u32 upper_addr)
805 {
806 u32 ob_addr_0;
807 u32 ob_addr_1;
808 u32 ob_desc_0;
809 u32 aw_offset;
810
811 if (region_no >= MAX_AXI_WRAPPER_REGION_NUM)
812 return -EINVAL;
813 if (num_pass_bits + 1 < 8)
814 return -EINVAL;
815 if (num_pass_bits > 63)
816 return -EINVAL;
817 if (region_no == 0) {
818 if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits))
819 return -EINVAL;
820 }
821 if (region_no != 0) {
822 if (AXI_REGION_SIZE < (2ULL << num_pass_bits))
823 return -EINVAL;
824 }
825
826 aw_offset = (region_no << OB_REG_SIZE_SHIFT);
827
828 ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS;
829 ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR;
830 ob_addr_1 = upper_addr;
831 ob_desc_0 = (1 << 23 | type);
832
833 rockchip_pcie_write(rockchip, ob_addr_0,
834 PCIE_CORE_OB_REGION_ADDR0 + aw_offset);
835 rockchip_pcie_write(rockchip, ob_addr_1,
836 PCIE_CORE_OB_REGION_ADDR1 + aw_offset);
837 rockchip_pcie_write(rockchip, ob_desc_0,
838 PCIE_CORE_OB_REGION_DESC0 + aw_offset);
839 rockchip_pcie_write(rockchip, 0,
840 PCIE_CORE_OB_REGION_DESC1 + aw_offset);
841
842 return 0;
843 }
844
rockchip_pcie_prog_ib_atu(struct rockchip_pcie * rockchip,int region_no,u8 num_pass_bits,u32 lower_addr,u32 upper_addr)845 static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
846 int region_no, u8 num_pass_bits,
847 u32 lower_addr, u32 upper_addr)
848 {
849 u32 ib_addr_0;
850 u32 ib_addr_1;
851 u32 aw_offset;
852
853 if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM)
854 return -EINVAL;
855 if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED)
856 return -EINVAL;
857 if (num_pass_bits > 63)
858 return -EINVAL;
859
860 aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT);
861
862 ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS;
863 ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR;
864 ib_addr_1 = upper_addr;
865
866 rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset);
867 rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset);
868
869 return 0;
870 }
871
rockchip_pcie_cfg_atu(struct rockchip_pcie * rockchip)872 static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
873 {
874 struct device *dev = rockchip->dev;
875 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rockchip);
876 struct resource_entry *entry;
877 u64 pci_addr, size;
878 int offset;
879 int err;
880 int reg_no;
881
882 rockchip_pcie_cfg_configuration_accesses(rockchip,
883 AXI_WRAPPER_TYPE0_CFG);
884 entry = resource_list_first_type(&bridge->windows, IORESOURCE_MEM);
885 if (!entry)
886 return -ENODEV;
887
888 size = resource_size(entry->res);
889 pci_addr = entry->res->start - entry->offset;
890 rockchip->msg_bus_addr = pci_addr;
891
892 for (reg_no = 0; reg_no < (size >> 20); reg_no++) {
893 err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
894 AXI_WRAPPER_MEM_WRITE,
895 20 - 1,
896 pci_addr + (reg_no << 20),
897 0);
898 if (err) {
899 dev_err(dev, "program RC mem outbound ATU failed\n");
900 return err;
901 }
902 }
903
904 /* Workaround for PCIe DMA transfer */
905 if (rockchip->dma_trx_enabled) {
906 rockchip_pcie_prog_ob_atu(rockchip, 1, AXI_WRAPPER_MEM_WRITE,
907 32 - 1, rockchip->mem_reserve_start, 0x0);
908 }
909
910 err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0);
911 if (err) {
912 dev_err(dev, "program RC mem inbound ATU failed\n");
913 return err;
914 }
915
916 entry = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
917 if (!entry)
918 return -ENODEV;
919
920 /* store the register number offset to program RC io outbound ATU */
921 offset = size >> 20;
922
923 size = resource_size(entry->res);
924 pci_addr = entry->res->start - entry->offset;
925
926 for (reg_no = 0; reg_no < (size >> 20); reg_no++) {
927 err = rockchip_pcie_prog_ob_atu(rockchip,
928 reg_no + 1 + offset,
929 AXI_WRAPPER_IO_WRITE,
930 20 - 1,
931 pci_addr + (reg_no << 20),
932 0);
933 if (err) {
934 dev_err(dev, "program RC io outbound ATU failed\n");
935 return err;
936 }
937 }
938
939 /* assign message regions */
940 rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset,
941 AXI_WRAPPER_NOR_MSG,
942 20 - 1, 0, 0);
943
944 rockchip->msg_bus_addr += ((reg_no + offset) << 20);
945 rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
946 if (!rockchip->msg_region)
947 err = -ENOMEM;
948 return err;
949 }
950
rockchip_pcie_wait_l2(struct rockchip_pcie * rockchip)951 static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip)
952 {
953 u32 value;
954 int err;
955
956 /* Don't enter L2 state when no ep connected */
957 if (rockchip->dma_trx_enabled == 1)
958 return 0;
959
960 /* send PME_TURN_OFF message */
961 writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF);
962
963 /* read LTSSM and wait for falling into L2 link state */
964 err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0,
965 value, PCIE_LINK_IS_L2(value), 20,
966 jiffies_to_usecs(5 * HZ));
967 if (err) {
968 dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n");
969 return err;
970 }
971
972 return 0;
973 }
974
rockchip_pcie_suspend_for_user(struct device * dev)975 static int rockchip_pcie_suspend_for_user(struct device *dev)
976 {
977 struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
978 int ret;
979
980 /* disable core and cli int since we don't need to ack PME_ACK */
981 rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) |
982 PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK);
983 rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK);
984
985 ret = rockchip_pcie_wait_l2(rockchip);
986 if (ret) {
987 rockchip_pcie_enable_interrupts(rockchip);
988 return ret;
989 }
990
991 /* disable ltssm */
992 rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_DISABLE,
993 PCIE_CLIENT_CONFIG);
994
995 rockchip_pcie_deinit_phys(rockchip);
996
997 return ret;
998 }
999
rockchip_pcie_resume_for_user(struct device * dev)1000 static int rockchip_pcie_resume_for_user(struct device *dev)
1001 {
1002 struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
1003 int err;
1004
1005 err = rockchip_pcie_host_init_port(rockchip);
1006 if (err)
1007 return err;
1008
1009 err = rockchip_pcie_cfg_atu(rockchip);
1010 if (err)
1011 return err;
1012
1013 /* Need this to enter L1 again */
1014 rockchip_pcie_update_txcredit_mui(rockchip);
1015 rockchip_pcie_enable_interrupts(rockchip);
1016
1017 return 0;
1018 }
1019
rockchip_pcie_suspend_noirq(struct device * dev)1020 static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
1021 {
1022 struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
1023 int ret = 0;
1024
1025 if (!rockchip->dma_trx_enabled)
1026 ret = rockchip_pcie_suspend_for_user(dev);
1027
1028 rockchip_pcie_disable_clocks(rockchip);
1029
1030 regulator_disable(rockchip->vpcie0v9);
1031
1032 return ret;
1033 }
1034
rockchip_pcie_resume_noirq(struct device * dev)1035 static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
1036 {
1037 struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
1038 int err;
1039
1040 err = regulator_enable(rockchip->vpcie0v9);
1041 if (err) {
1042 dev_err(dev, "fail to enable vpcie0v9 regulator\n");
1043 return err;
1044 }
1045
1046 err = rockchip_pcie_enable_clocks(rockchip);
1047 if (err)
1048 goto err_disable_0v9;
1049
1050 if (!rockchip->dma_trx_enabled)
1051 err = rockchip_pcie_resume_for_user(dev);
1052 if (err)
1053 goto err_disable_clocks;
1054
1055 return 0;
1056
1057 err_disable_clocks:
1058 rockchip_pcie_disable_clocks(rockchip);
1059 err_disable_0v9:
1060 regulator_disable(rockchip->vpcie0v9);
1061
1062 return err;
1063 }
1064
rockchip_pcie_really_probe(struct rockchip_pcie * rockchip)1065 static int rockchip_pcie_really_probe(struct rockchip_pcie *rockchip)
1066 {
1067 int err;
1068
1069 err = rockchip_pcie_host_init_port(rockchip);
1070 if (err)
1071 return err;
1072
1073 err = rockchip_pcie_setup_irq(rockchip);
1074 if (err)
1075 return err;
1076
1077 rockchip_pcie_enable_interrupts(rockchip);
1078
1079 err = rockchip_pcie_cfg_atu(rockchip);
1080 if (err)
1081 return err;
1082
1083 rockchip->bridge->sysdata = rockchip;
1084 rockchip->bridge->ops = &rockchip_pcie_ops;
1085
1086 device_init_wakeup(rockchip->dev, true);
1087
1088 return pci_host_probe(rockchip->bridge);
1089 }
1090
pcie_deferred_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1091 static ssize_t pcie_deferred_store(struct device *dev,
1092 struct device_attribute *attr,
1093 const char *buf, size_t size)
1094 {
1095 u32 val = 0;
1096 int err;
1097 struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
1098
1099 err = kstrtou32(buf, 10, &val);
1100 if (err)
1101 return err;
1102
1103 if (val) {
1104 rockchip->wait_ep = 1;
1105 err = rockchip_pcie_really_probe(rockchip);
1106 if (err)
1107 return -EINVAL;
1108 }
1109
1110 return size;
1111 }
1112
pcie_reset_ep_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1113 static ssize_t pcie_reset_ep_store(struct device *dev,
1114 struct device_attribute *attr,
1115 const char *buf, size_t size)
1116 {
1117 u32 val = 0;
1118 int err;
1119 struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
1120 struct dma_trx_obj *obj = rockchip->dma_obj;
1121
1122 dev_info(dev, "loop_cout = %d\n", obj->loop_count);
1123
1124 err = kstrtou32(buf, 10, &val);
1125 if (err)
1126 return err;
1127
1128 if (val == PCIE_USER_UNLINK)
1129 rockchip_pcie_suspend_for_user(rockchip->dev);
1130 else if (val == PCIE_USER_RELINK)
1131 rockchip_pcie_resume_for_user(rockchip->dev);
1132 else
1133 return -EINVAL;
1134
1135 return size;
1136 }
1137
1138 static DEVICE_ATTR_WO(pcie_deferred);
1139 static DEVICE_ATTR_WO(pcie_reset_ep);
1140
1141 static struct attribute *pcie_attrs[] = {
1142 &dev_attr_pcie_deferred.attr,
1143 &dev_attr_pcie_reset_ep.attr,
1144 NULL
1145 };
1146
1147 static const struct attribute_group pcie_attr_group = {
1148 .attrs = pcie_attrs,
1149 };
1150
rockchip_pcie_probe(struct platform_device * pdev)1151 static int rockchip_pcie_probe(struct platform_device *pdev)
1152 {
1153 struct rockchip_pcie *rockchip;
1154 struct device *dev = &pdev->dev;
1155 struct pci_host_bridge *bridge;
1156 int err;
1157
1158 if (!dev->of_node)
1159 return -ENODEV;
1160
1161 bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip));
1162 if (!bridge)
1163 return -ENOMEM;
1164
1165 rockchip = pci_host_bridge_priv(bridge);
1166
1167 rockchip->bridge = bridge;
1168
1169 platform_set_drvdata(pdev, rockchip);
1170 rockchip->dev = dev;
1171 rockchip->is_rc = true;
1172
1173 err = rockchip_pcie_parse_host_dt(rockchip);
1174 if (err)
1175 return err;
1176
1177 err = rockchip_pcie_enable_clocks(rockchip);
1178 if (err)
1179 return err;
1180
1181 err = rockchip_pcie_set_vpcie(rockchip);
1182 if (err) {
1183 dev_err(dev, "failed to set vpcie regulator\n");
1184 goto err_set_vpcie;
1185 }
1186
1187 err = rockchip_pcie_init_irq_domain(rockchip);
1188 if (err < 0)
1189 goto err_vpcie;
1190
1191 if (rockchip->deferred) {
1192 err = sysfs_create_group(&pdev->dev.kobj, &pcie_attr_group);
1193 if (err) {
1194 dev_err(&pdev->dev, "SysFS group creation failed\n");
1195 goto err_remove_irq_domain;
1196 }
1197 } else {
1198 err = rockchip_pcie_really_probe(rockchip);
1199 if (err) {
1200 dev_err(&pdev->dev, "deferred probe failed\n");
1201 goto err_deinit_port;
1202 }
1203 }
1204
1205 if (rockchip->dma_trx_enabled == 0)
1206 return 0;
1207
1208 rockchip->dma_obj = rk_pcie_dma_obj_probe(dev);
1209 if (IS_ERR(rockchip->dma_obj)) {
1210 dev_err(dev, "failed to prepare dma object\n");
1211 err = -EINVAL;
1212 goto err_deinit_port;
1213 }
1214
1215 if (rockchip->dma_obj) {
1216 rockchip->dma_obj->start_dma_func = rk_pcie_start_dma_rk3399;
1217 rockchip->dma_obj->config_dma_func = rk_pcie_config_dma_rk3399;
1218 }
1219
1220 return 0;
1221
1222 err_deinit_port:
1223 rockchip_pcie_deinit_phys(rockchip);
1224 if (rockchip->deferred)
1225 sysfs_remove_group(&pdev->dev.kobj, &pcie_attr_group);
1226 err_remove_irq_domain:
1227 irq_domain_remove(rockchip->irq_domain);
1228 err_vpcie:
1229 if (!IS_ERR(rockchip->vpcie12v))
1230 regulator_disable(rockchip->vpcie12v);
1231 if (!IS_ERR(rockchip->vpcie3v3))
1232 regulator_disable(rockchip->vpcie3v3);
1233 regulator_disable(rockchip->vpcie1v8);
1234 regulator_disable(rockchip->vpcie0v9);
1235 err_set_vpcie:
1236 rockchip_pcie_disable_clocks(rockchip);
1237 return err;
1238 }
1239
rockchip_pcie_remove(struct platform_device * pdev)1240 static int rockchip_pcie_remove(struct platform_device *pdev)
1241 {
1242 struct device *dev = &pdev->dev;
1243 struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
1244 u32 status1, status2;
1245 u32 status;
1246 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rockchip);
1247
1248 status1 = rockchip_pcie_read(rockchip, PCIE_CLIENT_BASIC_STATUS1);
1249 status2 = rockchip_pcie_read(rockchip, PCIE_CLIENT_DEBUG_OUT_0);
1250
1251 if (!PCIE_LINK_UP(status1) || !PCIE_LINK_IS_L0(status2))
1252 rockchip->in_remove = 1;
1253
1254 pci_stop_root_bus(bridge->bus);
1255 pci_remove_root_bus(bridge->bus);
1256 irq_domain_remove(rockchip->irq_domain);
1257
1258 /* disable link state */
1259 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
1260 status |= BIT(4);
1261 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
1262
1263 mdelay(1);
1264
1265 status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
1266 status &= ~BIT(4);
1267 rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
1268
1269 rockchip_pcie_deinit_phys(rockchip);
1270
1271 rockchip_pcie_disable_clocks(rockchip);
1272
1273 if (rockchip->dma_trx_enabled)
1274 rk_pcie_dma_obj_remove(rockchip->dma_obj);
1275
1276 if (rockchip->deferred)
1277 sysfs_remove_group(&pdev->dev.kobj, &pcie_attr_group);
1278
1279 if (!IS_ERR(rockchip->vpcie12v))
1280 regulator_disable(rockchip->vpcie12v);
1281 if (!IS_ERR(rockchip->vpcie3v3))
1282 regulator_disable(rockchip->vpcie3v3);
1283 regulator_disable(rockchip->vpcie1v8);
1284 regulator_disable(rockchip->vpcie0v9);
1285
1286 device_init_wakeup(rockchip->dev, false);
1287
1288 return 0;
1289 }
1290
1291 static const struct dev_pm_ops rockchip_pcie_pm_ops = {
1292 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq,
1293 rockchip_pcie_resume_noirq)
1294 };
1295
1296 static const struct of_device_id rockchip_pcie_of_match[] = {
1297 { .compatible = "rockchip,rk3399-pcie", },
1298 {}
1299 };
1300 MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match);
1301
1302 static struct platform_driver rockchip_pcie_driver = {
1303 .driver = {
1304 .name = "rockchip-pcie",
1305 .of_match_table = rockchip_pcie_of_match,
1306 .pm = &rockchip_pcie_pm_ops,
1307 },
1308 .probe = rockchip_pcie_probe,
1309 .remove = rockchip_pcie_remove,
1310 };
1311 module_platform_driver(rockchip_pcie_driver);
1312
1313 MODULE_AUTHOR("Rockchip Inc");
1314 MODULE_DESCRIPTION("Rockchip AXI PCIe driver");
1315 MODULE_LICENSE("GPL v2");
1316