1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCIe EP controller driver for Rockchip SoCs
4 *
5 * Copyright (C) 2021 Rockchip Electronics Co., Ltd.
6 * http://www.rock-chips.com
7 *
8 * Author: Simon Xue <xxm@rock-chips.com>
9 */
10
11 #include <linux/clk.h>
12 #include <linux/gpio/consumer.h>
13 #include <linux/miscdevice.h>
14 #include <linux/mfd/syscon.h>
15 #include <linux/module.h>
16 #include <linux/of_address.h>
17 #include <linux/of_device.h>
18 #include <linux/phy/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/regmap.h>
21 #include <linux/reset.h>
22 #include <linux/uaccess.h>
23 #include <uapi/linux/rk-pcie-ep.h>
24
25 #include "../rockchip-pcie-dma.h"
26 #include "pcie-designware.h"
27 #include "pcie-dw-dmatest.h"
28
29 /*
30 * The upper 16 bits of PCIE_CLIENT_CONFIG are a write
31 * mask for the lower 16 bits.
32 */
33 #define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
34 #define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
35
36 #define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
37
38 #define PCIE_DMA_OFFSET 0x380000
39
40 #define PCIE_DMA_CTRL_OFF 0x8
41 #define PCIE_DMA_WR_ENB 0xc
42 #define PCIE_DMA_WR_CTRL_LO 0x200
43 #define PCIE_DMA_WR_CTRL_HI 0x204
44 #define PCIE_DMA_WR_XFERSIZE 0x208
45 #define PCIE_DMA_WR_SAR_PTR_LO 0x20c
46 #define PCIE_DMA_WR_SAR_PTR_HI 0x210
47 #define PCIE_DMA_WR_DAR_PTR_LO 0x214
48 #define PCIE_DMA_WR_DAR_PTR_HI 0x218
49 #define PCIE_DMA_WR_WEILO 0x18
50 #define PCIE_DMA_WR_WEIHI 0x1c
51 #define PCIE_DMA_WR_DOORBELL 0x10
52 #define PCIE_DMA_WR_INT_STATUS 0x4c
53 #define PCIE_DMA_WR_INT_MASK 0x54
54 #define PCIE_DMA_WR_INT_CLEAR 0x58
55
56 #define PCIE_DMA_RD_ENB 0x2c
57 #define PCIE_DMA_RD_CTRL_LO 0x300
58 #define PCIE_DMA_RD_CTRL_HI 0x304
59 #define PCIE_DMA_RD_XFERSIZE 0x308
60 #define PCIE_DMA_RD_SAR_PTR_LO 0x30c
61 #define PCIE_DMA_RD_SAR_PTR_HI 0x310
62 #define PCIE_DMA_RD_DAR_PTR_LO 0x314
63 #define PCIE_DMA_RD_DAR_PTR_HI 0x318
64 #define PCIE_DMA_RD_WEILO 0x38
65 #define PCIE_DMA_RD_WEIHI 0x3c
66 #define PCIE_DMA_RD_DOORBELL 0x30
67 #define PCIE_DMA_RD_INT_STATUS 0xa0
68 #define PCIE_DMA_RD_INT_MASK 0xa8
69 #define PCIE_DMA_RD_INT_CLEAR 0xac
70
71 #define PCIE_DMA_CHANEL_MAX_NUM 2
72
73 #define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40)
74 #define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc)
75 #define PCIE_CLIENT_INTR_STATUS_MISC 0x10
76 #define PCIE_SMLH_LINKUP BIT(16)
77 #define PCIE_RDLH_LINKUP BIT(17)
78 #define PCIE_L0S_ENTRY 0x11
79 #define PCIE_CLIENT_GENERAL_CONTROL 0x0
80 #define PCIE_CLIENT_GENERAL_DEBUG 0x104
81 #define PCIE_CLIENT_HOT_RESET_CTRL 0x180
82 #define PCIE_CLIENT_LTSSM_STATUS 0x300
83 #define PCIE_CLIENT_INTR_MASK 0x24
84 #define PCIE_LTSSM_APP_DLY1_EN BIT(0)
85 #define PCIE_LTSSM_APP_DLY2_EN BIT(1)
86 #define PCIE_LTSSM_APP_DLY1_DONE BIT(2)
87 #define PCIE_LTSSM_APP_DLY2_DONE BIT(3)
88 #define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
89 #define PCIE_CLIENT_MSI_GEN_CON 0x38
90
91 #define PCIe_CLIENT_MSI_OBJ_IRQ 0 /* rockchip ep object special irq */
92
93 #define PCIE_ELBI_REG_NUM 0x2
94 #define PCIE_ELBI_LOCAL_BASE 0x200e00
95
96 #define PCIE_ELBI_APP_ELBI_INT_GEN0 0x0
97 #define PCIE_ELBI_APP_ELBI_INT_GEN0_SIGIO BIT(0)
98
99 #define PCIE_ELBI_APP_ELBI_INT_GEN1 0x4
100
101 #define PCIE_ELBI_LOCAL_ENABLE_OFF 0x8
102
103 #define PCIE_DIRECT_SPEED_CHANGE BIT(17)
104
105 #define PCIE_TYPE0_STATUS_COMMAND_REG 0x4
106 #define PCIE_TYPE0_HDR_DBI2_OFFSET 0x100000
107
108 #define PCIE_DBI_SIZE 0x400000
109
110 #define PCIE_EP_OBJ_INFO_DRV_VERSION 0x00000001
111
112 #define PCIE_BAR_MAX_NUM 6
113 #define PCIE_HOTRESET_TMOUT_US 10000
114
115 struct rockchip_pcie {
116 struct dw_pcie pci;
117 void __iomem *apb_base;
118 struct phy *phy;
119 struct clk_bulk_data *clks;
120 unsigned int clk_cnt;
121 struct reset_control *rst;
122 struct gpio_desc *rst_gpio;
123 struct regulator *vpcie3v3;
124 unsigned long *ib_window_map;
125 unsigned long *ob_window_map;
126 u32 num_ib_windows;
127 u32 num_ob_windows;
128 phys_addr_t *outbound_addr;
129 u8 bar_to_atu[PCIE_BAR_MAX_NUM];
130 dma_addr_t ib_target_address[PCIE_BAR_MAX_NUM];
131 u32 ib_target_size[PCIE_BAR_MAX_NUM];
132 void *ib_target_base[PCIE_BAR_MAX_NUM];
133 struct dma_trx_obj *dma_obj;
134 struct fasync_struct *async;
135 phys_addr_t dbi_base_physical;
136 struct pcie_ep_obj_info *obj_info;
137 enum pcie_ep_mmap_resource cur_mmap_res;
138 struct workqueue_struct *hot_rst_wq;
139 struct work_struct hot_rst_work;
140 };
141
142 struct rockchip_pcie_misc_dev {
143 struct miscdevice dev;
144 struct rockchip_pcie *pcie;
145 };
146
147 static const struct of_device_id rockchip_pcie_ep_of_match[] = {
148 {
149 .compatible = "rockchip,rk3568-pcie-std-ep",
150 },
151 {
152 .compatible = "rockchip,rk3588-pcie-std-ep",
153 },
154 {},
155 };
156
157 MODULE_DEVICE_TABLE(of, rockchip_pcie_ep_of_match);
158
rockchip_pcie_devmode_update(struct rockchip_pcie * rockchip,int mode,int submode)159 static void rockchip_pcie_devmode_update(struct rockchip_pcie *rockchip, int mode, int submode)
160 {
161 rockchip->obj_info->devmode.mode = mode;
162 rockchip->obj_info->devmode.submode = submode;
163 }
164
rockchip_pcie_readl_apb(struct rockchip_pcie * rockchip,u32 reg)165 static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip, u32 reg)
166 {
167 return readl(rockchip->apb_base + reg);
168 }
169
rockchip_pcie_writel_apb(struct rockchip_pcie * rockchip,u32 val,u32 reg)170 static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip, u32 val, u32 reg)
171 {
172 writel(val, rockchip->apb_base + reg);
173 }
174
rockchip_pcie_map_kernel(phys_addr_t start,size_t len)175 static void *rockchip_pcie_map_kernel(phys_addr_t start, size_t len)
176 {
177 int i;
178 void *vaddr;
179 pgprot_t pgprot;
180 phys_addr_t phys;
181 int npages = PAGE_ALIGN(len) / PAGE_SIZE;
182 struct page **p = vmalloc(sizeof(struct page *) * npages);
183
184 if (!p)
185 return NULL;
186
187 pgprot = pgprot_noncached(PAGE_KERNEL);
188
189 phys = start;
190 for (i = 0; i < npages; i++) {
191 p[i] = phys_to_page(phys);
192 phys += PAGE_SIZE;
193 }
194
195 vaddr = vmap(p, npages, VM_MAP, pgprot);
196 vfree(p);
197
198 return vaddr;
199 }
200
rockchip_pcie_resource_get(struct platform_device * pdev,struct rockchip_pcie * rockchip)201 static int rockchip_pcie_resource_get(struct platform_device *pdev,
202 struct rockchip_pcie *rockchip)
203 {
204 int ret;
205 struct device *dev = &pdev->dev;
206 struct device_node *np = dev->of_node;
207 void *addr;
208 struct resource *dbi_base;
209 struct device_node *mem;
210 struct resource reg;
211 char name[8];
212 int i, idx;
213
214 dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM,
215 "pcie-dbi");
216 if (!dbi_base) {
217 dev_err(&pdev->dev, "get pcie-dbi failed\n");
218 return -ENODEV;
219 }
220
221 rockchip->pci.dbi_base = devm_ioremap_resource(dev, dbi_base);
222 if (IS_ERR(rockchip->pci.dbi_base))
223 return PTR_ERR(rockchip->pci.dbi_base);
224 rockchip->pci.atu_base = rockchip->pci.dbi_base + DEFAULT_DBI_ATU_OFFSET;
225 rockchip->dbi_base_physical = dbi_base->start;
226
227 rockchip->apb_base = devm_platform_ioremap_resource_byname(pdev, "pcie-apb");
228 if (!rockchip->apb_base) {
229 dev_err(dev, "get pcie-apb failed\n");
230 return -ENODEV;
231 }
232
233 rockchip->rst_gpio = devm_gpiod_get_optional(dev, "reset",
234 GPIOD_OUT_HIGH);
235 if (IS_ERR(rockchip->rst_gpio))
236 return PTR_ERR(rockchip->rst_gpio);
237
238 ret = device_property_read_u32(dev, "num-ib-windows", &rockchip->num_ib_windows);
239 if (ret < 0) {
240 dev_err(dev, "unable to read *num-ib-windows* property\n");
241 return ret;
242 }
243
244 if (rockchip->num_ib_windows > MAX_IATU_IN) {
245 dev_err(dev, "Invalid *num-ib-windows*\n");
246 return -EINVAL;
247 }
248
249 ret = device_property_read_u32(dev, "num-ob-windows", &rockchip->num_ob_windows);
250 if (ret < 0) {
251 dev_err(dev, "Unable to read *num-ob-windows* property\n");
252 return ret;
253 }
254
255 if (rockchip->num_ob_windows > MAX_IATU_OUT) {
256 dev_err(dev, "Invalid *num-ob-windows*\n");
257 return -EINVAL;
258 }
259
260 rockchip->ib_window_map = devm_kcalloc(dev,
261 BITS_TO_LONGS(rockchip->num_ib_windows),
262 sizeof(long), GFP_KERNEL);
263 if (!rockchip->ib_window_map)
264 return -ENOMEM;
265
266 rockchip->ob_window_map = devm_kcalloc(dev,
267 BITS_TO_LONGS(rockchip->num_ob_windows),
268 sizeof(long), GFP_KERNEL);
269 if (!rockchip->ob_window_map)
270 return -ENOMEM;
271
272 addr = devm_kcalloc(dev, rockchip->num_ob_windows, sizeof(phys_addr_t),
273 GFP_KERNEL);
274 if (!addr)
275 return -ENOMEM;
276
277 rockchip->outbound_addr = addr;
278
279 for (i = 0; i < PCIE_BAR_MAX_NUM; i++) {
280 snprintf(name, sizeof(name), "bar%d", i);
281 idx = of_property_match_string(np, "memory-region-names", name);
282 if (idx < 0)
283 continue;
284
285 mem = of_parse_phandle(np, "memory-region", idx);
286 if (!mem) {
287 dev_err(dev, "missing \"memory-region\" %s property\n", name);
288 return -ENODEV;
289 }
290
291 ret = of_address_to_resource(mem, 0, ®);
292 if (ret < 0) {
293 dev_err(dev, "missing \"reg\" %s property\n", name);
294 return -ENODEV;
295 }
296
297 rockchip->ib_target_address[i] = reg.start;
298 rockchip->ib_target_size[i] = resource_size(®);
299 rockchip->ib_target_base[i] = rockchip_pcie_map_kernel(reg.start,
300 resource_size(®));
301 dev_info(dev, "%s: assigned [0x%llx-%llx]\n", name, rockchip->ib_target_address[i],
302 rockchip->ib_target_address[i] + rockchip->ib_target_size[i] - 1);
303 }
304
305 if (rockchip->ib_target_size[0]) {
306 rockchip->obj_info = (struct pcie_ep_obj_info *)rockchip->ib_target_base[0];
307 memset_io(rockchip->obj_info, 0, sizeof(struct pcie_ep_obj_info));
308 rockchip->obj_info->magic = PCIE_EP_OBJ_INFO_MAGIC;
309 rockchip->obj_info->version = PCIE_EP_OBJ_INFO_DRV_VERSION;
310 rockchip_pcie_devmode_update(rockchip, RKEP_MODE_KERNEL, RKEP_SMODE_INIT);
311 } else {
312 dev_err(dev, "missing bar0 memory region\n");
313 return -ENODEV;
314 }
315
316 return 0;
317 }
318
rockchip_pcie_enable_ltssm(struct rockchip_pcie * rockchip)319 static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
320 {
321 /* Set ep mode */
322 rockchip_pcie_writel_apb(rockchip, 0xf00000, 0x0);
323 rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
324 PCIE_CLIENT_GENERAL_CONTROL);
325 }
326
rockchip_pcie_link_up(struct dw_pcie * pci)327 static int rockchip_pcie_link_up(struct dw_pcie *pci)
328 {
329 struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
330 u32 val = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
331
332 if ((val & (PCIE_RDLH_LINKUP | PCIE_SMLH_LINKUP)) == 0x30000)
333 return 1;
334
335 return 0;
336 }
337
rockchip_pcie_start_link(struct dw_pcie * pci)338 static int rockchip_pcie_start_link(struct dw_pcie *pci)
339 {
340 struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
341
342 /* Reset device */
343 gpiod_set_value_cansleep(rockchip->rst_gpio, 0);
344 msleep(100);
345 gpiod_set_value_cansleep(rockchip->rst_gpio, 1);
346
347 rockchip_pcie_enable_ltssm(rockchip);
348
349 return 0;
350 }
351
rockchip_pcie_phy_init(struct rockchip_pcie * rockchip)352 static int rockchip_pcie_phy_init(struct rockchip_pcie *rockchip)
353 {
354 int ret;
355 struct device *dev = rockchip->pci.dev;
356
357 rockchip->phy = devm_phy_get(dev, "pcie-phy");
358 if (IS_ERR(rockchip->phy)) {
359 dev_err(dev, "missing phy\n");
360 return PTR_ERR(rockchip->phy);
361 }
362
363 ret = phy_init(rockchip->phy);
364 if (ret < 0)
365 return ret;
366
367 phy_power_on(rockchip->phy);
368
369 return 0;
370 }
371
rockchip_pcie_phy_deinit(struct rockchip_pcie * rockchip)372 static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
373 {
374 phy_exit(rockchip->phy);
375 phy_power_off(rockchip->phy);
376 }
377
rockchip_pcie_reset_control_release(struct rockchip_pcie * rockchip)378 static int rockchip_pcie_reset_control_release(struct rockchip_pcie *rockchip)
379 {
380 struct device *dev = rockchip->pci.dev;
381 int ret;
382
383 rockchip->rst = devm_reset_control_array_get_exclusive(dev);
384 if (IS_ERR(rockchip->rst)) {
385 dev_err(dev, "failed to get reset lines\n");
386 return PTR_ERR(rockchip->rst);
387 }
388
389 ret = reset_control_deassert(rockchip->rst);
390
391 return ret;
392 }
393
rockchip_pcie_clk_init(struct rockchip_pcie * rockchip)394 static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
395 {
396 struct device *dev = rockchip->pci.dev;
397 int ret;
398
399 ret = devm_clk_bulk_get_all(dev, &rockchip->clks);
400 if (ret < 0)
401 return ret;
402
403 rockchip->clk_cnt = ret;
404
405 ret = clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
406 if (ret)
407 return ret;
408
409 return 0;
410 }
411
rockchip_pci_find_resbar_capability(struct rockchip_pcie * rockchip)412 static int rockchip_pci_find_resbar_capability(struct rockchip_pcie *rockchip)
413 {
414 u32 header;
415 int ttl;
416 int start = 0;
417 int pos = PCI_CFG_SPACE_SIZE;
418 int cap = PCI_EXT_CAP_ID_REBAR;
419
420 /* minimum 8 bytes per capability */
421 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
422
423 header = dw_pcie_readl_dbi(&rockchip->pci, pos);
424
425 /*
426 * If we have no capabilities, this is indicated by cap ID,
427 * cap version and next pointer all being 0.
428 */
429 if (header == 0)
430 return 0;
431
432 while (ttl-- > 0) {
433 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
434 return pos;
435
436 pos = PCI_EXT_CAP_NEXT(header);
437 if (pos < PCI_CFG_SPACE_SIZE)
438 break;
439
440 header = dw_pcie_readl_dbi(&rockchip->pci, pos);
441 if (!header)
442 break;
443 }
444
445 return 0;
446 }
447
rockchip_pcie_ep_set_bar_flag(struct rockchip_pcie * rockchip,enum pci_barno barno,int flags)448 static int rockchip_pcie_ep_set_bar_flag(struct rockchip_pcie *rockchip, enum pci_barno barno,
449 int flags)
450 {
451 enum pci_barno bar = barno;
452 u32 reg;
453
454 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
455
456 /* Disabled the upper 32bits BAR to make a 64bits bar pair */
457 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
458 dw_pcie_writel_dbi(&rockchip->pci, reg + PCIE_TYPE0_HDR_DBI2_OFFSET + 4, 0);
459
460 dw_pcie_writel_dbi(&rockchip->pci, reg, flags);
461 if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
462 dw_pcie_writel_dbi(&rockchip->pci, reg + 4, 0);
463
464 return 0;
465 }
466
rockchip_pcie_resize_bar(struct rockchip_pcie * rockchip)467 static void rockchip_pcie_resize_bar(struct rockchip_pcie *rockchip)
468 {
469 struct dw_pcie *pci = &rockchip->pci;
470 struct device *dev = pci->dev;
471 struct device_node *np = dev->of_node;
472 int bar, ret;
473 u32 resbar_base, lanes, val;
474
475 ret = of_property_read_u32(np, "num-lanes", &lanes);
476 if (ret)
477 lanes = 0;
478
479 /* Set the number of lanes */
480 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
481 val &= ~PORT_LINK_MODE_MASK;
482 switch (lanes) {
483 case 1:
484 val |= PORT_LINK_MODE_1_LANES;
485 break;
486 case 2:
487 val |= PORT_LINK_MODE_2_LANES;
488 break;
489 case 4:
490 val |= PORT_LINK_MODE_4_LANES;
491 break;
492 case 8:
493 val |= PORT_LINK_MODE_8_LANES;
494 break;
495 default:
496 dev_err(dev, "num-lanes %u: invalid value\n", lanes);
497 return;
498 }
499
500 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
501
502 /* Set link width speed control register */
503 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
504 val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
505 switch (lanes) {
506 case 1:
507 val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
508 break;
509 case 2:
510 val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
511 break;
512 case 4:
513 val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
514 break;
515 case 8:
516 val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
517 break;
518 }
519
520 val |= PCIE_DIRECT_SPEED_CHANGE;
521
522 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
523
524 /* Enable bus master and memory space */
525 dw_pcie_writel_dbi(pci, PCIE_TYPE0_STATUS_COMMAND_REG, 0x6);
526
527 resbar_base = rockchip_pci_find_resbar_capability(rockchip);
528
529 /* Resize BAR0 4M 32bits, BAR2 64M 64bits-pref, BAR4 1MB 32bits */
530 bar = BAR_0;
531 dw_pcie_writel_dbi(pci, resbar_base + 0x4 + bar * 0x8, 0xfffff0);
532 dw_pcie_writel_dbi(pci, resbar_base + 0x8 + bar * 0x8, 0x2c0);
533 rockchip_pcie_ep_set_bar_flag(rockchip, bar, PCI_BASE_ADDRESS_MEM_TYPE_32);
534
535 bar = BAR_2;
536 dw_pcie_writel_dbi(pci, resbar_base + 0x4 + bar * 0x8, 0xfffff0);
537 dw_pcie_writel_dbi(pci, resbar_base + 0x8 + bar * 0x8, 0x6c0);
538 rockchip_pcie_ep_set_bar_flag(rockchip, bar,
539 PCI_BASE_ADDRESS_MEM_PREFETCH | PCI_BASE_ADDRESS_MEM_TYPE_64);
540
541 bar = BAR_4;
542 dw_pcie_writel_dbi(pci, resbar_base + 0x4 + bar * 0x8, 0xfffff0);
543 dw_pcie_writel_dbi(pci, resbar_base + 0x8 + bar * 0x8, 0xc0);
544 rockchip_pcie_ep_set_bar_flag(rockchip, bar, PCI_BASE_ADDRESS_MEM_TYPE_32);
545
546 /* Disable BAR1 BAR5*/
547 bar = BAR_1;
548 dw_pcie_writel_dbi(pci, PCIE_TYPE0_HDR_DBI2_OFFSET + 0x10 + bar * 4, 0);
549 bar = BAR_5;
550 dw_pcie_writel_dbi(pci, PCIE_TYPE0_HDR_DBI2_OFFSET + 0x10 + bar * 4, 0);
551 }
552
rockchip_pcie_init_id(struct rockchip_pcie * rockchip)553 static void rockchip_pcie_init_id(struct rockchip_pcie *rockchip)
554 {
555 struct dw_pcie *pci = &rockchip->pci;
556
557 dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, 0x356a);
558 dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, 0x0580);
559 }
560
rockchip_pcie_ep_set_bar(struct rockchip_pcie * rockchip,enum pci_barno bar,dma_addr_t cpu_addr)561 static int rockchip_pcie_ep_set_bar(struct rockchip_pcie *rockchip, enum pci_barno bar,
562 dma_addr_t cpu_addr)
563 {
564 int ret;
565 u32 free_win;
566 struct dw_pcie *pci = &rockchip->pci;
567 enum dw_pcie_as_type as_type;
568
569 free_win = find_first_zero_bit(rockchip->ib_window_map,
570 rockchip->num_ib_windows);
571 if (free_win >= rockchip->num_ib_windows) {
572 dev_err(pci->dev, "No free inbound window\n");
573 return -EINVAL;
574 }
575
576 as_type = DW_PCIE_AS_MEM;
577
578 ret = dw_pcie_prog_inbound_atu(pci, 0, free_win, bar, cpu_addr, as_type);
579 if (ret < 0) {
580 dev_err(pci->dev, "Failed to program IB window\n");
581 return ret;
582 }
583
584 rockchip->bar_to_atu[bar] = free_win;
585 set_bit(free_win, rockchip->ib_window_map);
586
587 return 0;
588 }
589
rockchip_pcie_fast_link_setup(struct rockchip_pcie * rockchip)590 static void rockchip_pcie_fast_link_setup(struct rockchip_pcie *rockchip)
591 {
592 u32 val;
593
594 /* LTSSM EN ctrl mode */
595 val = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_HOT_RESET_CTRL);
596 val |= (PCIE_LTSSM_ENABLE_ENHANCE | PCIE_LTSSM_APP_DLY2_EN) |
597 ((PCIE_LTSSM_ENABLE_ENHANCE | PCIE_LTSSM_APP_DLY2_EN) << 16);
598 rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
599 }
600
rockchip_pcie_iatu_unroll_enabled(struct dw_pcie * pci)601 static u8 rockchip_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
602 {
603 u32 val;
604
605 val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
606 if (val == 0xffffffff)
607 return 1;
608
609 return 0;
610 }
611
rockchip_pcie_local_elbi_enable(struct rockchip_pcie * rockchip)612 static void rockchip_pcie_local_elbi_enable(struct rockchip_pcie *rockchip)
613 {
614 int i;
615 u32 elbi_reg;
616 struct dw_pcie *pci = &rockchip->pci;
617
618 for (i = 0; i < PCIE_ELBI_REG_NUM; i++) {
619 elbi_reg = PCIE_ELBI_LOCAL_BASE + PCIE_ELBI_LOCAL_ENABLE_OFF +
620 i * 4;
621 dw_pcie_writel_dbi(pci, elbi_reg, 0xffff0000);
622 }
623 }
624
rockchip_pcie_elbi_clear(struct rockchip_pcie * rockchip)625 static void rockchip_pcie_elbi_clear(struct rockchip_pcie *rockchip)
626 {
627 int i;
628 u32 elbi_reg;
629 struct dw_pcie *pci = &rockchip->pci;
630 u32 val;
631
632 for (i = 0; i < PCIE_ELBI_REG_NUM; i++) {
633 elbi_reg = PCIE_ELBI_LOCAL_BASE + i * 4;
634 val = dw_pcie_readl_dbi(pci, elbi_reg);
635 val <<= 16;
636 dw_pcie_writel_dbi(pci, elbi_reg, val);
637 }
638 }
639
rockchip_pcie_raise_msi_irq(struct rockchip_pcie * rockchip,u8 interrupt_num)640 static void rockchip_pcie_raise_msi_irq(struct rockchip_pcie *rockchip, u8 interrupt_num)
641 {
642 rockchip_pcie_writel_apb(rockchip, BIT(interrupt_num), PCIE_CLIENT_MSI_GEN_CON);
643 }
644
rockchip_pcie_sys_irq_handler(int irq,void * arg)645 static irqreturn_t rockchip_pcie_sys_irq_handler(int irq, void *arg)
646 {
647 struct rockchip_pcie *rockchip = arg;
648 struct dw_pcie *pci = &rockchip->pci;
649 u32 elbi_reg;
650 u32 chn;
651 union int_status wr_status, rd_status;
652 union int_clear clears;
653 u32 reg, mask;
654 bool sigio = false;
655
656 /* ELBI helper, only check the valid bits, and discard the rest interrupts */
657 elbi_reg = dw_pcie_readl_dbi(pci, PCIE_ELBI_LOCAL_BASE + PCIE_ELBI_APP_ELBI_INT_GEN0);
658 if (elbi_reg & PCIE_ELBI_APP_ELBI_INT_GEN0_SIGIO) {
659 sigio = true;
660 rockchip->obj_info->irq_type_ep = OBJ_IRQ_ELBI;
661 rockchip_pcie_elbi_clear(rockchip);
662 goto out;
663 }
664
665 /* DMA helper */
666 mask = dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_MASK);
667 wr_status.asdword = dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_STATUS) & (~mask);
668 mask = dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_MASK);
669 rd_status.asdword = dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_STATUS) & (~mask);
670
671 for (chn = 0; chn < PCIE_DMA_CHANEL_MAX_NUM; chn++) {
672 if (wr_status.donesta & BIT(chn)) {
673 clears.doneclr = BIT(chn);
674 dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET +
675 PCIE_DMA_WR_INT_CLEAR, clears.asdword);
676 if (rockchip->dma_obj && rockchip->dma_obj->cb)
677 rockchip->dma_obj->cb(rockchip->dma_obj, chn, DMA_TO_BUS);
678 }
679
680 if (wr_status.abortsta & BIT(chn)) {
681 dev_err(pci->dev, "%s, abort\n", __func__);
682 clears.abortclr = BIT(chn);
683 dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET +
684 PCIE_DMA_WR_INT_CLEAR, clears.asdword);
685 }
686 }
687
688 for (chn = 0; chn < PCIE_DMA_CHANEL_MAX_NUM; chn++) {
689 if (rd_status.donesta & BIT(chn)) {
690 clears.doneclr = BIT(chn);
691 dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET +
692 PCIE_DMA_RD_INT_CLEAR, clears.asdword);
693 if (rockchip->dma_obj && rockchip->dma_obj->cb)
694 rockchip->dma_obj->cb(rockchip->dma_obj, chn, DMA_FROM_BUS);
695 }
696
697 if (rd_status.abortsta & BIT(chn)) {
698 dev_err(pci->dev, "%s, abort\n", __func__);
699 clears.abortclr = BIT(chn);
700 dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET +
701 PCIE_DMA_RD_INT_CLEAR, clears.asdword);
702 }
703 }
704
705 if (wr_status.asdword || rd_status.asdword) {
706 rockchip->obj_info->irq_type_rc = OBJ_IRQ_DMA;
707 rockchip->obj_info->dma_status_rc.wr |= wr_status.asdword;
708 rockchip->obj_info->dma_status_rc.rd |= rd_status.asdword;
709 rockchip_pcie_raise_msi_irq(rockchip, PCIe_CLIENT_MSI_OBJ_IRQ);
710
711 rockchip->obj_info->irq_type_ep = OBJ_IRQ_DMA;
712 rockchip->obj_info->dma_status_ep.wr |= wr_status.asdword;
713 rockchip->obj_info->dma_status_ep.rd |= rd_status.asdword;
714 sigio = true;
715 }
716
717 out:
718 if (sigio) {
719 dev_dbg(rockchip->pci.dev, "SIGIO\n");
720 kill_fasync(&rockchip->async, SIGIO, POLL_IN);
721 }
722
723 reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
724 if (reg & BIT(2))
725 queue_work(rockchip->hot_rst_wq, &rockchip->hot_rst_work);
726
727 rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
728
729 return IRQ_HANDLED;
730 }
731
rockchip_pcie_request_sys_irq(struct rockchip_pcie * rockchip,struct platform_device * pdev)732 static int rockchip_pcie_request_sys_irq(struct rockchip_pcie *rockchip,
733 struct platform_device *pdev)
734 {
735 int irq;
736 int ret;
737 struct device *dev = rockchip->pci.dev;
738
739 irq = platform_get_irq_byname(pdev, "sys");
740 if (irq < 0) {
741 dev_err(dev, "missing sys IRQ resource\n");
742 return -EINVAL;
743 }
744
745 ret = devm_request_irq(dev, irq, rockchip_pcie_sys_irq_handler,
746 IRQF_SHARED, "pcie-sys", rockchip);
747 if (ret) {
748 dev_err(dev, "failed to request PCIe subsystem IRQ\n");
749 return ret;
750 }
751
752 return 0;
753 }
754
rockchip_pcie_udma_enabled(struct rockchip_pcie * rockchip)755 static bool rockchip_pcie_udma_enabled(struct rockchip_pcie *rockchip)
756 {
757 struct dw_pcie *pci = &rockchip->pci;
758
759 return dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_CTRL_OFF);
760 }
761
rockchip_pcie_init_dma_trx(struct rockchip_pcie * rockchip)762 static int rockchip_pcie_init_dma_trx(struct rockchip_pcie *rockchip)
763 {
764 struct dw_pcie *pci = &rockchip->pci;
765
766 if (!rockchip_pcie_udma_enabled(rockchip))
767 return 0;
768
769 rockchip->dma_obj = pcie_dw_dmatest_register(pci->dev, true);
770 if (IS_ERR(rockchip->dma_obj)) {
771 dev_err(rockchip->pci.dev, "failed to prepare dmatest\n");
772 return -EINVAL;
773 }
774
775 /* Enable client write and read interrupt */
776 rockchip_pcie_writel_apb(rockchip, 0xc000000, PCIE_CLIENT_INTR_MASK);
777
778 /* Enable core write interrupt */
779 dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_MASK, 0x0);
780 /* Enable core read interrupt */
781 dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_MASK, 0x0);
782
783 return 0;
784 }
785
rockchip_pcie_start_dma_rd(struct dma_trx_obj * obj,struct dma_table * cur,int ctr_off)786 static void rockchip_pcie_start_dma_rd(struct dma_trx_obj *obj, struct dma_table *cur, int ctr_off)
787 {
788 struct rockchip_pcie *rockchip = dev_get_drvdata(obj->dev);
789 struct dw_pcie *pci = &rockchip->pci;
790
791 dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_ENB,
792 cur->enb.asdword);
793 dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_RD_CTRL_LO,
794 cur->ctx_reg.ctrllo.asdword);
795 dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_RD_CTRL_HI,
796 cur->ctx_reg.ctrlhi.asdword);
797 dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_RD_XFERSIZE,
798 cur->ctx_reg.xfersize);
799 dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_RD_SAR_PTR_LO,
800 cur->ctx_reg.sarptrlo);
801 dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_RD_SAR_PTR_HI,
802 cur->ctx_reg.sarptrhi);
803 dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_RD_DAR_PTR_LO,
804 cur->ctx_reg.darptrlo);
805 dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_RD_DAR_PTR_HI,
806 cur->ctx_reg.darptrhi);
807 dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_DOORBELL,
808 cur->start.asdword);
809 }
810
rockchip_pcie_start_dma_wr(struct dma_trx_obj * obj,struct dma_table * cur,int ctr_off)811 static void rockchip_pcie_start_dma_wr(struct dma_trx_obj *obj, struct dma_table *cur, int ctr_off)
812 {
813 struct rockchip_pcie *rockchip = dev_get_drvdata(obj->dev);
814 struct dw_pcie *pci = &rockchip->pci;
815
816 dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_ENB,
817 cur->enb.asdword);
818 dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_WR_CTRL_LO,
819 cur->ctx_reg.ctrllo.asdword);
820 dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_WR_CTRL_HI,
821 cur->ctx_reg.ctrlhi.asdword);
822 dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_WR_XFERSIZE,
823 cur->ctx_reg.xfersize);
824 dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_WR_SAR_PTR_LO,
825 cur->ctx_reg.sarptrlo);
826 dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_WR_SAR_PTR_HI,
827 cur->ctx_reg.sarptrhi);
828 dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_WR_DAR_PTR_LO,
829 cur->ctx_reg.darptrlo);
830 dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_WR_DAR_PTR_HI,
831 cur->ctx_reg.darptrhi);
832 dw_pcie_writel_dbi(pci, ctr_off + PCIE_DMA_WR_WEILO,
833 cur->weilo.asdword);
834 dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_DOORBELL,
835 cur->start.asdword);
836 }
837
rockchip_pcie_start_dma_dwc(struct dma_trx_obj * obj,struct dma_table * table)838 static void rockchip_pcie_start_dma_dwc(struct dma_trx_obj *obj, struct dma_table *table)
839 {
840 int dir = table->dir;
841 int chn = table->chn;
842
843 int ctr_off = PCIE_DMA_OFFSET + chn * 0x200;
844
845 if (dir == DMA_FROM_BUS)
846 rockchip_pcie_start_dma_rd(obj, table, ctr_off);
847 else if (dir == DMA_TO_BUS)
848 rockchip_pcie_start_dma_wr(obj, table, ctr_off);
849 }
850
rockchip_pcie_config_dma_dwc(struct dma_table * table)851 static void rockchip_pcie_config_dma_dwc(struct dma_table *table)
852 {
853 table->enb.enb = 0x1;
854 table->ctx_reg.ctrllo.lie = 0x1;
855 table->ctx_reg.ctrllo.rie = 0x0;
856 table->ctx_reg.ctrllo.td = 0x1;
857 table->ctx_reg.ctrlhi.asdword = 0x0;
858 table->ctx_reg.xfersize = table->buf_size;
859 if (table->dir == DMA_FROM_BUS) {
860 table->ctx_reg.sarptrlo = (u32)(table->bus & 0xffffffff);
861 table->ctx_reg.sarptrhi = (u32)(table->bus >> 32);
862 table->ctx_reg.darptrlo = (u32)(table->local & 0xffffffff);
863 table->ctx_reg.darptrhi = (u32)(table->local >> 32);
864 } else if (table->dir == DMA_TO_BUS) {
865 table->ctx_reg.sarptrlo = (u32)(table->local & 0xffffffff);
866 table->ctx_reg.sarptrhi = (u32)(table->local >> 32);
867 table->ctx_reg.darptrlo = (u32)(table->bus & 0xffffffff);
868 table->ctx_reg.darptrhi = (u32)(table->bus >> 32);
869 }
870 table->weilo.weight0 = 0x0;
871 table->start.stop = 0x0;
872 table->start.chnl = table->chn;
873 }
874
rockchip_pcie_hot_rst_work(struct work_struct * work)875 static void rockchip_pcie_hot_rst_work(struct work_struct *work)
876 {
877 struct rockchip_pcie *rockchip = container_of(work, struct rockchip_pcie, hot_rst_work);
878 u32 status;
879 int ret;
880
881 if (rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_HOT_RESET_CTRL) & PCIE_LTSSM_APP_DLY2_EN) {
882 ret = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_LTSSM_STATUS,
883 status, ((status & 0x3F) == 0), 100, PCIE_HOTRESET_TMOUT_US);
884 if (ret)
885 dev_err(rockchip->pci.dev, "wait for detect quiet failed!\n");
886
887 rockchip_pcie_writel_apb(rockchip, (PCIE_LTSSM_APP_DLY2_DONE) | ((PCIE_LTSSM_APP_DLY2_DONE) << 16),
888 PCIE_CLIENT_HOT_RESET_CTRL);
889 }
890 }
891
rockchip_pcie_get_dma_status(struct dma_trx_obj * obj,u8 chn,enum dma_dir dir)892 static int rockchip_pcie_get_dma_status(struct dma_trx_obj *obj, u8 chn, enum dma_dir dir)
893 {
894 struct rockchip_pcie *rockchip = dev_get_drvdata(obj->dev);
895 struct dw_pcie *pci = &rockchip->pci;
896 union int_status status;
897 union int_clear clears;
898 int ret = 0;
899
900 dev_dbg(pci->dev, "%s %x %x\n", __func__,
901 dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_STATUS),
902 dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_STATUS));
903
904 if (dir == DMA_TO_BUS) {
905 status.asdword = dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_STATUS);
906 if (status.donesta & BIT(chn)) {
907 clears.doneclr = BIT(chn);
908 dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET +
909 PCIE_DMA_WR_INT_CLEAR, clears.asdword);
910 ret = 1;
911 }
912
913 if (status.abortsta & BIT(chn)) {
914 dev_err(pci->dev, "%s, write abort\n", __func__);
915 clears.abortclr = BIT(chn);
916 dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET +
917 PCIE_DMA_WR_INT_CLEAR, clears.asdword);
918 ret = -1;
919 }
920 } else {
921 status.asdword = dw_pcie_readl_dbi(pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_STATUS);
922
923 if (status.donesta & BIT(chn)) {
924 clears.doneclr = BIT(chn);
925 dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET +
926 PCIE_DMA_RD_INT_CLEAR, clears.asdword);
927 ret = 1;
928 }
929
930 if (status.abortsta & BIT(chn)) {
931 dev_err(pci->dev, "%s, read abort %x\n", __func__, status.asdword);
932 clears.abortclr = BIT(chn);
933 dw_pcie_writel_dbi(pci, PCIE_DMA_OFFSET +
934 PCIE_DMA_RD_INT_CLEAR, clears.asdword);
935 ret = -1;
936 }
937 }
938
939 return ret;
940 }
941
942 static const struct dw_pcie_ops dw_pcie_ops = {
943 .start_link = rockchip_pcie_start_link,
944 .link_up = rockchip_pcie_link_up,
945 };
946
pcie_ep_fasync(int fd,struct file * file,int mode)947 static int pcie_ep_fasync(int fd, struct file *file, int mode)
948 {
949 struct rockchip_pcie *rockchip = (struct rockchip_pcie *)file->private_data;
950
951 return fasync_helper(fd, file, mode, &rockchip->async);
952 }
953
pcie_ep_open(struct inode * inode,struct file * file)954 static int pcie_ep_open(struct inode *inode, struct file *file)
955 {
956 struct miscdevice *miscdev = file->private_data;
957 struct rockchip_pcie_misc_dev *pcie_misc_dev;
958
959 pcie_misc_dev = container_of(miscdev, struct rockchip_pcie_misc_dev, dev);
960 file->private_data = pcie_misc_dev->pcie;
961
962 return 0;
963 }
964
pcie_ep_release(struct inode * inode,struct file * file)965 static int pcie_ep_release(struct inode *inode, struct file *file)
966 {
967 return pcie_ep_fasync(-1, file, 0);
968 }
969
pcie_ep_ioctl(struct file * file,unsigned int cmd,unsigned long arg)970 static long pcie_ep_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
971 {
972 struct rockchip_pcie *rockchip = (struct rockchip_pcie *)file->private_data;
973 struct pcie_ep_user_data msg;
974 struct pcie_ep_dma_cache_cfg cfg;
975 void __user *uarg = (void __user *)arg;
976 int i, ret;
977 enum pcie_ep_mmap_resource mmap_res;
978
979 switch (cmd) {
980 case PCIE_DMA_GET_ELBI_DATA:
981 for (i = 4; i <= 6; i++)
982 msg.elbi_app_user[i - 4] = dw_pcie_readl_dbi(&rockchip->pci,
983 PCIE_ELBI_LOCAL_BASE + i * 4);
984 for (i = 8; i <= 15; i++)
985 msg.elbi_app_user[i - 5] = dw_pcie_readl_dbi(&rockchip->pci,
986 PCIE_ELBI_LOCAL_BASE + i * 4);
987
988 ret = copy_to_user(uarg, &msg, sizeof(msg));
989 if (ret) {
990 dev_err(rockchip->pci.dev, "failed to get elbi data\n");
991 return -EFAULT;
992 }
993 break;
994 case PCIE_DMA_CACHE_INVALIDE:
995 ret = copy_from_user(&cfg, uarg, sizeof(cfg));
996 if (ret) {
997 dev_err(rockchip->pci.dev, "failed to get copy from\n");
998 return -EFAULT;
999 }
1000 dma_sync_single_for_cpu(rockchip->pci.dev, cfg.addr, cfg.size, DMA_FROM_DEVICE);
1001 break;
1002 case PCIE_DMA_CACHE_FLUSH:
1003 ret = copy_from_user(&cfg, uarg, sizeof(cfg));
1004 if (ret) {
1005 dev_err(rockchip->pci.dev, "failed to get copy from\n");
1006 return -EFAULT;
1007 }
1008 dma_sync_single_for_device(rockchip->pci.dev, cfg.addr, cfg.size, DMA_TO_DEVICE);
1009 break;
1010 case PCIE_DMA_IRQ_MASK_ALL:
1011 dw_pcie_writel_dbi(&rockchip->pci, PCIE_DMA_OFFSET + PCIE_DMA_WR_INT_MASK,
1012 0xffffffff);
1013 dw_pcie_writel_dbi(&rockchip->pci, PCIE_DMA_OFFSET + PCIE_DMA_RD_INT_MASK,
1014 0xffffffff);
1015 break;
1016 case PCIE_DMA_RAISE_MSI_OBJ_IRQ_USER:
1017 rockchip->obj_info->irq_type_rc = OBJ_IRQ_USER;
1018 rockchip_pcie_raise_msi_irq(rockchip, PCIe_CLIENT_MSI_OBJ_IRQ);
1019 break;
1020 case PCIE_EP_GET_USER_INFO:
1021 msg.bar0_phys_addr = rockchip->ib_target_address[0];
1022
1023 ret = copy_to_user(uarg, &msg, sizeof(msg));
1024 if (ret) {
1025 dev_err(rockchip->pci.dev, "failed to get elbi data\n");
1026 return -EFAULT;
1027 }
1028 break;
1029 case PCIE_EP_SET_MMAP_RESOURCE:
1030 ret = copy_from_user(&mmap_res, uarg, sizeof(mmap_res));
1031 if (ret) {
1032 dev_err(rockchip->pci.dev, "failed to get copy from\n");
1033 return -EFAULT;
1034 }
1035
1036 if (mmap_res >= PCIE_EP_MMAP_RESOURCE_MAX) {
1037 dev_err(rockchip->pci.dev, "mmap index %d is out of number\n", mmap_res);
1038 return -EINVAL;
1039 }
1040
1041 rockchip->cur_mmap_res = mmap_res;
1042 break;
1043 default:
1044 break;
1045 }
1046 return 0;
1047 }
1048
pcie_ep_mmap(struct file * file,struct vm_area_struct * vma)1049 static int pcie_ep_mmap(struct file *file, struct vm_area_struct *vma)
1050 {
1051 struct rockchip_pcie *rockchip = (struct rockchip_pcie *)file->private_data;
1052 size_t size = vma->vm_end - vma->vm_start;
1053 int err;
1054 unsigned long addr;
1055
1056 switch (rockchip->cur_mmap_res) {
1057 case PCIE_EP_MMAP_RESOURCE_DBI:
1058 if (size > PCIE_DBI_SIZE) {
1059 dev_warn(rockchip->pci.dev, "dbi mmap size is out of limitation\n");
1060 return -EINVAL;
1061 }
1062 addr = rockchip->dbi_base_physical;
1063 break;
1064 case PCIE_EP_MMAP_RESOURCE_BAR0:
1065 if (size > rockchip->ib_target_size[0]) {
1066 dev_warn(rockchip->pci.dev, "bar0 mmap size is out of limitation\n");
1067 return -EINVAL;
1068 }
1069 addr = rockchip->ib_target_address[0];
1070 break;
1071 case PCIE_EP_MMAP_RESOURCE_BAR2:
1072 if (size > rockchip->ib_target_size[2]) {
1073 dev_warn(rockchip->pci.dev, "bar2 mmap size is out of limitation\n");
1074 return -EINVAL;
1075 }
1076 addr = rockchip->ib_target_address[2];
1077 break;
1078 default:
1079 dev_err(rockchip->pci.dev, "cur mmap_res %d is unsurreport\n", rockchip->cur_mmap_res);
1080 return -EINVAL;
1081 }
1082
1083 vma->vm_flags |= VM_IO;
1084 vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
1085
1086 if (rockchip->cur_mmap_res == PCIE_EP_MMAP_RESOURCE_BAR2)
1087 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1088 else
1089 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1090
1091 err = remap_pfn_range(vma, vma->vm_start,
1092 __phys_to_pfn(addr),
1093 size, vma->vm_page_prot);
1094 if (err)
1095 return -EAGAIN;
1096
1097 return 0;
1098 }
1099
1100 static const struct file_operations pcie_ep_ops = {
1101 .owner = THIS_MODULE,
1102 .open = pcie_ep_open,
1103 .release = pcie_ep_release,
1104 .unlocked_ioctl = pcie_ep_ioctl,
1105 .fasync = pcie_ep_fasync,
1106 .mmap = pcie_ep_mmap,
1107 };
1108
rockchip_pcie_add_misc(struct rockchip_pcie * rockchip)1109 static int rockchip_pcie_add_misc(struct rockchip_pcie *rockchip)
1110 {
1111 int ret;
1112 struct rockchip_pcie_misc_dev *pcie_dev;
1113
1114 pcie_dev = devm_kzalloc(rockchip->pci.dev, sizeof(struct rockchip_pcie_misc_dev),
1115 GFP_KERNEL);
1116 if (!pcie_dev)
1117 return -ENOMEM;
1118
1119 pcie_dev->dev.minor = MISC_DYNAMIC_MINOR;
1120 pcie_dev->dev.name = "pcie_ep";
1121 pcie_dev->dev.fops = &pcie_ep_ops;
1122 pcie_dev->dev.parent = rockchip->pci.dev;
1123
1124 ret = misc_register(&pcie_dev->dev);
1125 if (ret) {
1126 dev_err(rockchip->pci.dev, "pcie: failed to register misc device.\n");
1127 return ret;
1128 }
1129
1130 pcie_dev->pcie = rockchip;
1131
1132 dev_info(rockchip->pci.dev, "register misc device pcie_ep\n");
1133
1134 return 0;
1135 }
1136
rockchip_pcie_ep_probe(struct platform_device * pdev)1137 static int rockchip_pcie_ep_probe(struct platform_device *pdev)
1138 {
1139 struct device *dev = &pdev->dev;
1140 struct rockchip_pcie *rockchip;
1141 int ret;
1142 int retry, i;
1143 u32 reg;
1144
1145 rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
1146 if (!rockchip)
1147 return -ENOMEM;
1148
1149 platform_set_drvdata(pdev, rockchip);
1150
1151 rockchip->pci.dev = dev;
1152 rockchip->pci.ops = &dw_pcie_ops;
1153
1154 ret = rockchip_pcie_resource_get(pdev, rockchip);
1155 if (ret)
1156 return ret;
1157
1158 /* DON'T MOVE ME: must be enable before phy init */
1159 rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
1160 if (IS_ERR(rockchip->vpcie3v3)) {
1161 if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
1162 return PTR_ERR(rockchip->vpcie3v3);
1163 dev_info(dev, "no vpcie3v3 regulator found\n");
1164 }
1165
1166 if (!IS_ERR(rockchip->vpcie3v3)) {
1167 ret = regulator_enable(rockchip->vpcie3v3);
1168 if (ret) {
1169 dev_err(dev, "fail to enable vpcie3v3 regulator\n");
1170 return ret;
1171 }
1172 }
1173
1174 ret = rockchip_pcie_clk_init(rockchip);
1175 if (ret)
1176 goto disable_regulator;
1177
1178 if (dw_pcie_link_up(&rockchip->pci)) {
1179 dev_info(dev, "already linkup\n");
1180 goto already_linkup;
1181 } else {
1182 dev_info(dev, "initial\n");
1183 }
1184
1185 ret = rockchip_pcie_phy_init(rockchip);
1186 if (ret)
1187 goto deinit_clk;
1188
1189 ret = rockchip_pcie_reset_control_release(rockchip);
1190 if (ret)
1191 goto deinit_phy;
1192
1193 dw_pcie_setup(&rockchip->pci);
1194
1195 dw_pcie_dbi_ro_wr_en(&rockchip->pci);
1196 rockchip_pcie_resize_bar(rockchip);
1197 rockchip_pcie_init_id(rockchip);
1198 dw_pcie_dbi_ro_wr_dis(&rockchip->pci);
1199
1200 rockchip_pcie_fast_link_setup(rockchip);
1201
1202 rockchip_pcie_start_link(&rockchip->pci);
1203 rockchip_pcie_devmode_update(rockchip, RKEP_MODE_KERNEL, RKEP_SMODE_LNKRDY);
1204
1205 rockchip->hot_rst_wq = create_singlethread_workqueue("rkep_hot_rst_wq");
1206 if (!rockchip->hot_rst_wq) {
1207 dev_err(dev, "failed to create hot_rst workqueue\n");
1208 ret = -ENOMEM;
1209 goto deinit_phy;
1210 }
1211 INIT_WORK(&rockchip->hot_rst_work, rockchip_pcie_hot_rst_work);
1212
1213 reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
1214 if ((reg & BIT(2)) &&
1215 (rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_HOT_RESET_CTRL) & PCIE_LTSSM_APP_DLY2_EN)) {
1216 rockchip_pcie_writel_apb(rockchip, PCIE_LTSSM_APP_DLY2_DONE | (PCIE_LTSSM_APP_DLY2_DONE << 16),
1217 PCIE_CLIENT_HOT_RESET_CTRL);
1218 dev_info(dev, "hot reset ever\n");
1219 }
1220 rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
1221
1222 /* Enable client reset or link down interrupt */
1223 rockchip_pcie_writel_apb(rockchip, 0x40000, PCIE_CLIENT_INTR_MASK);
1224
1225 for (retry = 0; retry < 10000; retry++) {
1226 if (dw_pcie_link_up(&rockchip->pci)) {
1227 /*
1228 * We may be here in case of L0 in Gen1. But if EP is capable
1229 * of Gen2 or Gen3, Gen switch may happen just in this time, but
1230 * we keep on accessing devices in unstable link status. Given
1231 * that LTSSM max timeout is 24ms per period, we can wait a bit
1232 * more for Gen switch.
1233 */
1234 msleep(2000);
1235 dev_info(dev, "PCIe Link up, LTSSM is 0x%x\n",
1236 rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS));
1237 break;
1238 }
1239
1240 dev_info_ratelimited(dev, "PCIe Linking... LTSSM is 0x%x\n",
1241 rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS));
1242 msleep(20);
1243 }
1244
1245 if (retry >= 10000) {
1246 ret = -ENODEV;
1247 goto deinit_phy;
1248 }
1249
1250 already_linkup:
1251 rockchip_pcie_devmode_update(rockchip, RKEP_MODE_KERNEL, RKEP_SMODE_LNKUP);
1252 rockchip->pci.iatu_unroll_enabled = rockchip_pcie_iatu_unroll_enabled(&rockchip->pci);
1253 for (i = 0; i < PCIE_BAR_MAX_NUM; i++)
1254 if (rockchip->ib_target_size[i])
1255 rockchip_pcie_ep_set_bar(rockchip, i, rockchip->ib_target_address[i]);
1256
1257 ret = rockchip_pcie_init_dma_trx(rockchip);
1258 if (ret) {
1259 dev_err(dev, "failed to add dma extension\n");
1260 return ret;
1261 }
1262
1263 if (rockchip->dma_obj) {
1264 rockchip->dma_obj->start_dma_func = rockchip_pcie_start_dma_dwc;
1265 rockchip->dma_obj->config_dma_func = rockchip_pcie_config_dma_dwc;
1266 rockchip->dma_obj->get_dma_status = rockchip_pcie_get_dma_status;
1267 }
1268
1269 /* Enable client ELBI interrupt */
1270 rockchip_pcie_writel_apb(rockchip, 0x80000000, PCIE_CLIENT_INTR_MASK);
1271 /* Enable ELBI interrupt */
1272 rockchip_pcie_local_elbi_enable(rockchip);
1273
1274 ret = rockchip_pcie_request_sys_irq(rockchip, pdev);
1275 if (ret)
1276 goto deinit_phy;
1277
1278 rockchip_pcie_add_misc(rockchip);
1279
1280 return 0;
1281
1282 deinit_phy:
1283 rockchip_pcie_phy_deinit(rockchip);
1284 deinit_clk:
1285 clk_bulk_disable_unprepare(rockchip->clk_cnt, rockchip->clks);
1286 disable_regulator:
1287 if (!IS_ERR(rockchip->vpcie3v3))
1288 regulator_disable(rockchip->vpcie3v3);
1289
1290 return ret;
1291 }
1292
1293 static struct platform_driver rk_plat_pcie_driver = {
1294 .driver = {
1295 .name = "rk-pcie-ep",
1296 .of_match_table = rockchip_pcie_ep_of_match,
1297 .suppress_bind_attrs = true,
1298 },
1299 .probe = rockchip_pcie_ep_probe,
1300 };
1301
1302 module_platform_driver(rk_plat_pcie_driver);
1303
1304 MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com>");
1305 MODULE_DESCRIPTION("RockChip PCIe Controller EP driver");
1306 MODULE_LICENSE("GPL");
1307