1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Synopsys DesignWare PCIe host controller driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6*4882a593Smuzhiyun * https://www.samsung.com
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Author: Jingoo Han <jg1.han@samsung.com>
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/irqchip/chained_irq.h>
12*4882a593Smuzhiyun #include <linux/irqdomain.h>
13*4882a593Smuzhiyun #include <linux/msi.h>
14*4882a593Smuzhiyun #include <linux/of_address.h>
15*4882a593Smuzhiyun #include <linux/of_pci.h>
16*4882a593Smuzhiyun #include <linux/pci_regs.h>
17*4882a593Smuzhiyun #include <linux/platform_device.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include "../../pci.h"
20*4882a593Smuzhiyun #include "pcie-designware.h"
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun static struct pci_ops dw_pcie_ops;
23*4882a593Smuzhiyun static struct pci_ops dw_child_pcie_ops;
24*4882a593Smuzhiyun
dw_msi_ack_irq(struct irq_data * d)25*4882a593Smuzhiyun static void dw_msi_ack_irq(struct irq_data *d)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun irq_chip_ack_parent(d);
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun
dw_msi_mask_irq(struct irq_data * d)30*4882a593Smuzhiyun static void dw_msi_mask_irq(struct irq_data *d)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun pci_msi_mask_irq(d);
33*4882a593Smuzhiyun irq_chip_mask_parent(d);
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun
dw_msi_unmask_irq(struct irq_data * d)36*4882a593Smuzhiyun static void dw_msi_unmask_irq(struct irq_data *d)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun pci_msi_unmask_irq(d);
39*4882a593Smuzhiyun irq_chip_unmask_parent(d);
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun static struct irq_chip dw_pcie_msi_irq_chip = {
43*4882a593Smuzhiyun .name = "PCI-MSI",
44*4882a593Smuzhiyun .irq_ack = dw_msi_ack_irq,
45*4882a593Smuzhiyun .irq_mask = dw_msi_mask_irq,
46*4882a593Smuzhiyun .irq_unmask = dw_msi_unmask_irq,
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun static struct msi_domain_info dw_pcie_msi_domain_info = {
50*4882a593Smuzhiyun .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
51*4882a593Smuzhiyun MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
52*4882a593Smuzhiyun .chip = &dw_pcie_msi_irq_chip,
53*4882a593Smuzhiyun };
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* MSI int handler */
dw_handle_msi_irq(struct pcie_port * pp)56*4882a593Smuzhiyun irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun int i, pos, irq;
59*4882a593Smuzhiyun unsigned long val;
60*4882a593Smuzhiyun u32 status, num_ctrls;
61*4882a593Smuzhiyun irqreturn_t ret = IRQ_NONE;
62*4882a593Smuzhiyun struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun num_ctrls = DIV_ROUND_UP(pp->num_vectors, MAX_MSI_IRQS_PER_CTRL);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun for (i = 0; i < num_ctrls; i++) {
67*4882a593Smuzhiyun status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
68*4882a593Smuzhiyun (i * MSI_REG_CTRL_BLOCK_SIZE));
69*4882a593Smuzhiyun if (!status)
70*4882a593Smuzhiyun continue;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun ret = IRQ_HANDLED;
73*4882a593Smuzhiyun val = status;
74*4882a593Smuzhiyun pos = 0;
75*4882a593Smuzhiyun while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
76*4882a593Smuzhiyun pos)) != MAX_MSI_IRQS_PER_CTRL) {
77*4882a593Smuzhiyun irq = irq_find_mapping(pp->irq_domain,
78*4882a593Smuzhiyun (i * MAX_MSI_IRQS_PER_CTRL) +
79*4882a593Smuzhiyun pos);
80*4882a593Smuzhiyun generic_handle_irq(irq);
81*4882a593Smuzhiyun pos++;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun return ret;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dw_handle_msi_irq);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /* Chained MSI interrupt service routine */
dw_chained_msi_isr(struct irq_desc * desc)90*4882a593Smuzhiyun static void dw_chained_msi_isr(struct irq_desc *desc)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun struct irq_chip *chip = irq_desc_get_chip(desc);
93*4882a593Smuzhiyun struct pcie_port *pp;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun chained_irq_enter(chip, desc);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun pp = irq_desc_get_handler_data(desc);
98*4882a593Smuzhiyun dw_handle_msi_irq(pp);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun chained_irq_exit(chip, desc);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
dw_pci_setup_msi_msg(struct irq_data * d,struct msi_msg * msg)103*4882a593Smuzhiyun static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun struct pcie_port *pp = irq_data_get_irq_chip_data(d);
106*4882a593Smuzhiyun struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
107*4882a593Smuzhiyun u64 msi_target;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun msi_target = (u64)pp->msi_data;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun msg->address_lo = lower_32_bits(msi_target);
112*4882a593Smuzhiyun msg->address_hi = upper_32_bits(msi_target);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun msg->data = d->hwirq;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
117*4882a593Smuzhiyun (int)d->hwirq, msg->address_hi, msg->address_lo);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
dw_pci_msi_set_affinity(struct irq_data * d,const struct cpumask * mask,bool force)120*4882a593Smuzhiyun static int dw_pci_msi_set_affinity(struct irq_data *d,
121*4882a593Smuzhiyun const struct cpumask *mask, bool force)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun return -EINVAL;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
dw_pci_bottom_mask(struct irq_data * d)126*4882a593Smuzhiyun static void dw_pci_bottom_mask(struct irq_data *d)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun struct pcie_port *pp = irq_data_get_irq_chip_data(d);
129*4882a593Smuzhiyun struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
130*4882a593Smuzhiyun unsigned int res, bit, ctrl;
131*4882a593Smuzhiyun unsigned long flags;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun raw_spin_lock_irqsave(&pp->lock, flags);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
136*4882a593Smuzhiyun res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
137*4882a593Smuzhiyun bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun pp->irq_mask[ctrl] |= BIT(bit);
140*4882a593Smuzhiyun dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&pp->lock, flags);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
dw_pci_bottom_unmask(struct irq_data * d)145*4882a593Smuzhiyun static void dw_pci_bottom_unmask(struct irq_data *d)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun struct pcie_port *pp = irq_data_get_irq_chip_data(d);
148*4882a593Smuzhiyun struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
149*4882a593Smuzhiyun unsigned int res, bit, ctrl;
150*4882a593Smuzhiyun unsigned long flags;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun raw_spin_lock_irqsave(&pp->lock, flags);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
155*4882a593Smuzhiyun res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
156*4882a593Smuzhiyun bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun pp->irq_mask[ctrl] &= ~BIT(bit);
159*4882a593Smuzhiyun dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&pp->lock, flags);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
dw_pci_bottom_ack(struct irq_data * d)164*4882a593Smuzhiyun static void dw_pci_bottom_ack(struct irq_data *d)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun struct pcie_port *pp = irq_data_get_irq_chip_data(d);
167*4882a593Smuzhiyun struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
168*4882a593Smuzhiyun unsigned int res, bit, ctrl;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
171*4882a593Smuzhiyun res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
172*4882a593Smuzhiyun bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun static struct irq_chip dw_pci_msi_bottom_irq_chip = {
178*4882a593Smuzhiyun .name = "DWPCI-MSI",
179*4882a593Smuzhiyun .irq_ack = dw_pci_bottom_ack,
180*4882a593Smuzhiyun .irq_compose_msi_msg = dw_pci_setup_msi_msg,
181*4882a593Smuzhiyun .irq_set_affinity = dw_pci_msi_set_affinity,
182*4882a593Smuzhiyun .irq_mask = dw_pci_bottom_mask,
183*4882a593Smuzhiyun .irq_unmask = dw_pci_bottom_unmask,
184*4882a593Smuzhiyun };
185*4882a593Smuzhiyun
dw_pcie_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)186*4882a593Smuzhiyun static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
187*4882a593Smuzhiyun unsigned int virq, unsigned int nr_irqs,
188*4882a593Smuzhiyun void *args)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun struct pcie_port *pp = domain->host_data;
191*4882a593Smuzhiyun unsigned long flags;
192*4882a593Smuzhiyun u32 i;
193*4882a593Smuzhiyun int bit;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun raw_spin_lock_irqsave(&pp->lock, flags);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
198*4882a593Smuzhiyun order_base_2(nr_irqs));
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&pp->lock, flags);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun if (bit < 0)
203*4882a593Smuzhiyun return -ENOSPC;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun for (i = 0; i < nr_irqs; i++)
206*4882a593Smuzhiyun irq_domain_set_info(domain, virq + i, bit + i,
207*4882a593Smuzhiyun pp->msi_irq_chip,
208*4882a593Smuzhiyun pp, handle_edge_irq,
209*4882a593Smuzhiyun NULL, NULL);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun return 0;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
dw_pcie_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)214*4882a593Smuzhiyun static void dw_pcie_irq_domain_free(struct irq_domain *domain,
215*4882a593Smuzhiyun unsigned int virq, unsigned int nr_irqs)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun struct irq_data *d = irq_domain_get_irq_data(domain, virq);
218*4882a593Smuzhiyun struct pcie_port *pp = domain->host_data;
219*4882a593Smuzhiyun unsigned long flags;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun raw_spin_lock_irqsave(&pp->lock, flags);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
224*4882a593Smuzhiyun order_base_2(nr_irqs));
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&pp->lock, flags);
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
230*4882a593Smuzhiyun .alloc = dw_pcie_irq_domain_alloc,
231*4882a593Smuzhiyun .free = dw_pcie_irq_domain_free,
232*4882a593Smuzhiyun };
233*4882a593Smuzhiyun
dw_pcie_allocate_domains(struct pcie_port * pp)234*4882a593Smuzhiyun int dw_pcie_allocate_domains(struct pcie_port *pp)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
237*4882a593Smuzhiyun struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
240*4882a593Smuzhiyun &dw_pcie_msi_domain_ops, pp);
241*4882a593Smuzhiyun if (!pp->irq_domain) {
242*4882a593Smuzhiyun dev_err(pci->dev, "Failed to create IRQ domain\n");
243*4882a593Smuzhiyun return -ENOMEM;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun pp->msi_domain = pci_msi_create_irq_domain(fwnode,
249*4882a593Smuzhiyun &dw_pcie_msi_domain_info,
250*4882a593Smuzhiyun pp->irq_domain);
251*4882a593Smuzhiyun if (!pp->msi_domain) {
252*4882a593Smuzhiyun dev_err(pci->dev, "Failed to create MSI domain\n");
253*4882a593Smuzhiyun irq_domain_remove(pp->irq_domain);
254*4882a593Smuzhiyun return -ENOMEM;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun return 0;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
dw_pcie_free_msi(struct pcie_port * pp)260*4882a593Smuzhiyun void dw_pcie_free_msi(struct pcie_port *pp)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun if (pp->msi_irq) {
263*4882a593Smuzhiyun irq_set_chained_handler(pp->msi_irq, NULL);
264*4882a593Smuzhiyun irq_set_handler_data(pp->msi_irq, NULL);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun irq_domain_remove(pp->msi_domain);
268*4882a593Smuzhiyun irq_domain_remove(pp->irq_domain);
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun if (pp->msi_data) {
271*4882a593Smuzhiyun struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
272*4882a593Smuzhiyun struct device *dev = pci->dev;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun dma_unmap_single_attrs(dev, pp->msi_data, sizeof(pp->msi_msg),
275*4882a593Smuzhiyun DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
dw_pcie_msi_init(struct pcie_port * pp)279*4882a593Smuzhiyun void dw_pcie_msi_init(struct pcie_port *pp)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
282*4882a593Smuzhiyun u64 msi_target = (u64)pp->msi_data;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_PCI_MSI))
285*4882a593Smuzhiyun return;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /* Program the msi_data */
288*4882a593Smuzhiyun dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
289*4882a593Smuzhiyun dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
292*4882a593Smuzhiyun
dw_pcie_host_init(struct pcie_port * pp)293*4882a593Smuzhiyun int dw_pcie_host_init(struct pcie_port *pp)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
296*4882a593Smuzhiyun struct device *dev = pci->dev;
297*4882a593Smuzhiyun struct device_node *np = dev->of_node;
298*4882a593Smuzhiyun struct platform_device *pdev = to_platform_device(dev);
299*4882a593Smuzhiyun struct resource_entry *win;
300*4882a593Smuzhiyun struct pci_host_bridge *bridge;
301*4882a593Smuzhiyun struct resource *cfg_res;
302*4882a593Smuzhiyun int ret;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun raw_spin_lock_init(&pci->pp.lock);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
307*4882a593Smuzhiyun if (cfg_res) {
308*4882a593Smuzhiyun pp->cfg0_size = resource_size(cfg_res);
309*4882a593Smuzhiyun pp->cfg0_base = cfg_res->start;
310*4882a593Smuzhiyun } else if (!pp->va_cfg0_base) {
311*4882a593Smuzhiyun dev_err(dev, "Missing *config* reg space\n");
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun bridge = devm_pci_alloc_host_bridge(dev, 0);
315*4882a593Smuzhiyun if (!bridge)
316*4882a593Smuzhiyun return -ENOMEM;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun pp->bridge = bridge;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /* Get the I/O and memory ranges from DT */
321*4882a593Smuzhiyun resource_list_for_each_entry(win, &bridge->windows) {
322*4882a593Smuzhiyun switch (resource_type(win->res)) {
323*4882a593Smuzhiyun case IORESOURCE_IO:
324*4882a593Smuzhiyun pp->io_size = resource_size(win->res);
325*4882a593Smuzhiyun pp->io_bus_addr = win->res->start - win->offset;
326*4882a593Smuzhiyun pp->io_base = pci_pio_to_address(win->res->start);
327*4882a593Smuzhiyun break;
328*4882a593Smuzhiyun case 0:
329*4882a593Smuzhiyun dev_err(dev, "Missing *config* reg space\n");
330*4882a593Smuzhiyun pp->cfg0_size = resource_size(win->res);
331*4882a593Smuzhiyun pp->cfg0_base = win->res->start;
332*4882a593Smuzhiyun if (!pci->dbi_base) {
333*4882a593Smuzhiyun pci->dbi_base = devm_pci_remap_cfgspace(dev,
334*4882a593Smuzhiyun pp->cfg0_base,
335*4882a593Smuzhiyun pp->cfg0_size);
336*4882a593Smuzhiyun if (!pci->dbi_base) {
337*4882a593Smuzhiyun dev_err(dev, "Error with ioremap\n");
338*4882a593Smuzhiyun return -ENOMEM;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun break;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if (!pp->va_cfg0_base) {
346*4882a593Smuzhiyun pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
347*4882a593Smuzhiyun pp->cfg0_base, pp->cfg0_size);
348*4882a593Smuzhiyun if (!pp->va_cfg0_base) {
349*4882a593Smuzhiyun dev_err(dev, "Error with ioremap in function\n");
350*4882a593Smuzhiyun return -ENOMEM;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
355*4882a593Smuzhiyun if (ret)
356*4882a593Smuzhiyun pci->num_viewport = 2;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun if (pci->link_gen < 1)
359*4882a593Smuzhiyun pci->link_gen = of_pci_get_max_link_speed(np);
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun if (pci_msi_enabled()) {
362*4882a593Smuzhiyun /*
363*4882a593Smuzhiyun * If a specific SoC driver needs to change the
364*4882a593Smuzhiyun * default number of vectors, it needs to implement
365*4882a593Smuzhiyun * the set_num_vectors callback.
366*4882a593Smuzhiyun */
367*4882a593Smuzhiyun if (!pp->ops->set_num_vectors) {
368*4882a593Smuzhiyun pp->num_vectors = MSI_DEF_NUM_VECTORS;
369*4882a593Smuzhiyun } else {
370*4882a593Smuzhiyun pp->ops->set_num_vectors(pp);
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun if (pp->num_vectors > MAX_MSI_IRQS ||
373*4882a593Smuzhiyun pp->num_vectors == 0) {
374*4882a593Smuzhiyun dev_err(dev,
375*4882a593Smuzhiyun "Invalid number of vectors\n");
376*4882a593Smuzhiyun return -EINVAL;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun if (!pp->ops->msi_host_init) {
381*4882a593Smuzhiyun pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun ret = dw_pcie_allocate_domains(pp);
384*4882a593Smuzhiyun if (ret)
385*4882a593Smuzhiyun return ret;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun if (pp->msi_irq)
388*4882a593Smuzhiyun irq_set_chained_handler_and_data(pp->msi_irq,
389*4882a593Smuzhiyun dw_chained_msi_isr,
390*4882a593Smuzhiyun pp);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg,
393*4882a593Smuzhiyun sizeof(pp->msi_msg),
394*4882a593Smuzhiyun DMA_FROM_DEVICE,
395*4882a593Smuzhiyun DMA_ATTR_SKIP_CPU_SYNC);
396*4882a593Smuzhiyun ret = dma_mapping_error(pci->dev, pp->msi_data);
397*4882a593Smuzhiyun if (ret) {
398*4882a593Smuzhiyun dev_err(pci->dev, "Failed to map MSI data\n");
399*4882a593Smuzhiyun pp->msi_data = 0;
400*4882a593Smuzhiyun goto err_free_msi;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun } else {
403*4882a593Smuzhiyun ret = pp->ops->msi_host_init(pp);
404*4882a593Smuzhiyun if (ret < 0)
405*4882a593Smuzhiyun return ret;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /* Set default bus ops */
410*4882a593Smuzhiyun bridge->ops = &dw_pcie_ops;
411*4882a593Smuzhiyun bridge->child_ops = &dw_child_pcie_ops;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun if (pp->ops->host_init) {
414*4882a593Smuzhiyun ret = pp->ops->host_init(pp);
415*4882a593Smuzhiyun if (ret)
416*4882a593Smuzhiyun goto err_free_msi;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun bridge->sysdata = pp;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun ret = pci_host_probe(bridge);
422*4882a593Smuzhiyun if (!ret)
423*4882a593Smuzhiyun return 0;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun err_free_msi:
426*4882a593Smuzhiyun if (pci_msi_enabled() && !pp->ops->msi_host_init)
427*4882a593Smuzhiyun dw_pcie_free_msi(pp);
428*4882a593Smuzhiyun return ret;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dw_pcie_host_init);
431*4882a593Smuzhiyun
dw_pcie_host_deinit(struct pcie_port * pp)432*4882a593Smuzhiyun void dw_pcie_host_deinit(struct pcie_port *pp)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun pci_stop_root_bus(pp->bridge->bus);
435*4882a593Smuzhiyun pci_remove_root_bus(pp->bridge->bus);
436*4882a593Smuzhiyun if (pci_msi_enabled() && !pp->ops->msi_host_init)
437*4882a593Smuzhiyun dw_pcie_free_msi(pp);
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
440*4882a593Smuzhiyun
dw_pcie_other_conf_map_bus(struct pci_bus * bus,unsigned int devfn,int where)441*4882a593Smuzhiyun static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
442*4882a593Smuzhiyun unsigned int devfn, int where)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun int type;
445*4882a593Smuzhiyun u32 busdev;
446*4882a593Smuzhiyun struct pcie_port *pp = bus->sysdata;
447*4882a593Smuzhiyun struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun /*
450*4882a593Smuzhiyun * Checking whether the link is up here is a last line of defense
451*4882a593Smuzhiyun * against platforms that forward errors on the system bus as
452*4882a593Smuzhiyun * SError upon PCI configuration transactions issued when the link
453*4882a593Smuzhiyun * is down. This check is racy by definition and does not stop
454*4882a593Smuzhiyun * the system from triggering an SError if the link goes down
455*4882a593Smuzhiyun * after this check is performed.
456*4882a593Smuzhiyun */
457*4882a593Smuzhiyun if (!dw_pcie_link_up(pci))
458*4882a593Smuzhiyun return NULL;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
461*4882a593Smuzhiyun PCIE_ATU_FUNC(PCI_FUNC(devfn));
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun if (pci_is_root_bus(bus->parent))
464*4882a593Smuzhiyun type = PCIE_ATU_TYPE_CFG0;
465*4882a593Smuzhiyun else
466*4882a593Smuzhiyun type = PCIE_ATU_TYPE_CFG1;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev, pp->cfg0_size);
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun return pp->va_cfg0_base + where;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
dw_pcie_rd_other_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)474*4882a593Smuzhiyun static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
475*4882a593Smuzhiyun int where, int size, u32 *val)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun int ret;
478*4882a593Smuzhiyun struct pcie_port *pp = bus->sysdata;
479*4882a593Smuzhiyun struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun ret = pci_generic_config_read(bus, devfn, where, size, val);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun if (!ret && (pci->iatu_unroll_enabled & DWC_IATU_IOCFG_SHARED))
484*4882a593Smuzhiyun dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
485*4882a593Smuzhiyun pp->io_bus_addr, pp->io_size);
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun return ret;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
dw_pcie_wr_other_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)490*4882a593Smuzhiyun static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
491*4882a593Smuzhiyun int where, int size, u32 val)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun int ret;
494*4882a593Smuzhiyun struct pcie_port *pp = bus->sysdata;
495*4882a593Smuzhiyun struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun ret = pci_generic_config_write(bus, devfn, where, size, val);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun if (!ret && (pci->iatu_unroll_enabled & DWC_IATU_IOCFG_SHARED))
500*4882a593Smuzhiyun dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO, pp->io_base,
501*4882a593Smuzhiyun pp->io_bus_addr, pp->io_size);
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun return ret;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun static struct pci_ops dw_child_pcie_ops = {
507*4882a593Smuzhiyun .map_bus = dw_pcie_other_conf_map_bus,
508*4882a593Smuzhiyun .read = dw_pcie_rd_other_conf,
509*4882a593Smuzhiyun .write = dw_pcie_wr_other_conf,
510*4882a593Smuzhiyun };
511*4882a593Smuzhiyun
dw_pcie_own_conf_map_bus(struct pci_bus * bus,unsigned int devfn,int where)512*4882a593Smuzhiyun void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun struct pcie_port *pp = bus->sysdata;
515*4882a593Smuzhiyun struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun if (PCI_SLOT(devfn) > 0)
518*4882a593Smuzhiyun return NULL;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun return pci->dbi_base + where;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun static struct pci_ops dw_pcie_ops = {
525*4882a593Smuzhiyun .map_bus = dw_pcie_own_conf_map_bus,
526*4882a593Smuzhiyun .read = pci_generic_config_read,
527*4882a593Smuzhiyun .write = pci_generic_config_write,
528*4882a593Smuzhiyun };
529*4882a593Smuzhiyun
dw_pcie_setup_rc(struct pcie_port * pp)530*4882a593Smuzhiyun void dw_pcie_setup_rc(struct pcie_port *pp)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun u32 val, ctrl, num_ctrls;
533*4882a593Smuzhiyun struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun /*
536*4882a593Smuzhiyun * Enable DBI read-only registers for writing/updating configuration.
537*4882a593Smuzhiyun * Write permission gets disabled towards the end of this function.
538*4882a593Smuzhiyun */
539*4882a593Smuzhiyun dw_pcie_dbi_ro_wr_en(pci);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun dw_pcie_setup(pci);
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun if (pci_msi_enabled() && !pp->ops->msi_host_init) {
544*4882a593Smuzhiyun num_ctrls = DIV_ROUND_UP(pp->num_vectors, MAX_MSI_IRQS_PER_CTRL);
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun /* Initialize IRQ Status array */
547*4882a593Smuzhiyun for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
548*4882a593Smuzhiyun pp->irq_mask[ctrl] = ~0;
549*4882a593Smuzhiyun dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
550*4882a593Smuzhiyun (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
551*4882a593Smuzhiyun pp->irq_mask[ctrl]);
552*4882a593Smuzhiyun dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
553*4882a593Smuzhiyun (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
554*4882a593Smuzhiyun ~0);
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun /* Setup RC BARs */
559*4882a593Smuzhiyun dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
560*4882a593Smuzhiyun dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun /* Setup interrupt pins */
563*4882a593Smuzhiyun val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
564*4882a593Smuzhiyun val &= 0xffff00ff;
565*4882a593Smuzhiyun val |= 0x00000100;
566*4882a593Smuzhiyun dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun /* Setup bus numbers */
569*4882a593Smuzhiyun val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
570*4882a593Smuzhiyun val &= 0xff000000;
571*4882a593Smuzhiyun val |= 0x00ff0100;
572*4882a593Smuzhiyun dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /* Setup command register */
575*4882a593Smuzhiyun val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
576*4882a593Smuzhiyun val &= 0xffff0000;
577*4882a593Smuzhiyun val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
578*4882a593Smuzhiyun PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
579*4882a593Smuzhiyun dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun /*
582*4882a593Smuzhiyun * If the platform provides its own child bus config accesses, it means
583*4882a593Smuzhiyun * the platform uses its own address translation component rather than
584*4882a593Smuzhiyun * ATU, so we should not program the ATU here.
585*4882a593Smuzhiyun */
586*4882a593Smuzhiyun if (pp->bridge->child_ops == &dw_child_pcie_ops) {
587*4882a593Smuzhiyun int atu_idx = 0;
588*4882a593Smuzhiyun struct resource_entry *entry;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun /* Get last memory resource entry */
591*4882a593Smuzhiyun resource_list_for_each_entry(entry, &pp->bridge->windows) {
592*4882a593Smuzhiyun if (resource_type(entry->res) != IORESOURCE_MEM)
593*4882a593Smuzhiyun continue;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun if (pci->num_viewport <= ++atu_idx)
596*4882a593Smuzhiyun break;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun dw_pcie_prog_outbound_atu(pci, atu_idx,
599*4882a593Smuzhiyun PCIE_ATU_TYPE_MEM, entry->res->start,
600*4882a593Smuzhiyun entry->res->start - entry->offset,
601*4882a593Smuzhiyun resource_size(entry->res));
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun if (pp->io_size) {
605*4882a593Smuzhiyun if (pci->num_viewport > ++atu_idx)
606*4882a593Smuzhiyun dw_pcie_prog_outbound_atu(pci, atu_idx,
607*4882a593Smuzhiyun PCIE_ATU_TYPE_IO, pp->io_base,
608*4882a593Smuzhiyun pp->io_bus_addr, pp->io_size);
609*4882a593Smuzhiyun else
610*4882a593Smuzhiyun pci->iatu_unroll_enabled |= DWC_IATU_IOCFG_SHARED;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun if (pci->num_viewport <= atu_idx)
614*4882a593Smuzhiyun dev_warn(pci->dev, "Resources exceed number of ATU entries (%d)",
615*4882a593Smuzhiyun pci->num_viewport);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun /* Program correct class for RC */
621*4882a593Smuzhiyun dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
624*4882a593Smuzhiyun val |= PORT_LOGIC_SPEED_CHANGE;
625*4882a593Smuzhiyun dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun dw_pcie_dbi_ro_wr_dis(pci);
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
630