xref: /OK3568_Linux_fs/kernel/drivers/pci/controller/pcie-tango.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/irqchip/chained_irq.h>
3*4882a593Smuzhiyun #include <linux/irqdomain.h>
4*4882a593Smuzhiyun #include <linux/pci-ecam.h>
5*4882a593Smuzhiyun #include <linux/delay.h>
6*4882a593Smuzhiyun #include <linux/msi.h>
7*4882a593Smuzhiyun #include <linux/of_address.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #define MSI_MAX			256
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #define SMP8759_MUX		0x48
12*4882a593Smuzhiyun #define SMP8759_TEST_OUT	0x74
13*4882a593Smuzhiyun #define SMP8759_DOORBELL	0x7c
14*4882a593Smuzhiyun #define SMP8759_STATUS		0x80
15*4882a593Smuzhiyun #define SMP8759_ENABLE		0xa0
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun struct tango_pcie {
18*4882a593Smuzhiyun 	DECLARE_BITMAP(used_msi, MSI_MAX);
19*4882a593Smuzhiyun 	u64			msi_doorbell;
20*4882a593Smuzhiyun 	spinlock_t		used_msi_lock;
21*4882a593Smuzhiyun 	void __iomem		*base;
22*4882a593Smuzhiyun 	struct irq_domain	*dom;
23*4882a593Smuzhiyun };
24*4882a593Smuzhiyun 
tango_msi_isr(struct irq_desc * desc)25*4882a593Smuzhiyun static void tango_msi_isr(struct irq_desc *desc)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	struct irq_chip *chip = irq_desc_get_chip(desc);
28*4882a593Smuzhiyun 	struct tango_pcie *pcie = irq_desc_get_handler_data(desc);
29*4882a593Smuzhiyun 	unsigned long status, base, virq, idx, pos = 0;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	chained_irq_enter(chip, desc);
32*4882a593Smuzhiyun 	spin_lock(&pcie->used_msi_lock);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	while ((pos = find_next_bit(pcie->used_msi, MSI_MAX, pos)) < MSI_MAX) {
35*4882a593Smuzhiyun 		base = round_down(pos, 32);
36*4882a593Smuzhiyun 		status = readl_relaxed(pcie->base + SMP8759_STATUS + base / 8);
37*4882a593Smuzhiyun 		for_each_set_bit(idx, &status, 32) {
38*4882a593Smuzhiyun 			virq = irq_find_mapping(pcie->dom, base + idx);
39*4882a593Smuzhiyun 			generic_handle_irq(virq);
40*4882a593Smuzhiyun 		}
41*4882a593Smuzhiyun 		pos = base + 32;
42*4882a593Smuzhiyun 	}
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	spin_unlock(&pcie->used_msi_lock);
45*4882a593Smuzhiyun 	chained_irq_exit(chip, desc);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
tango_ack(struct irq_data * d)48*4882a593Smuzhiyun static void tango_ack(struct irq_data *d)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	struct tango_pcie *pcie = d->chip_data;
51*4882a593Smuzhiyun 	u32 offset = (d->hwirq / 32) * 4;
52*4882a593Smuzhiyun 	u32 bit = BIT(d->hwirq % 32);
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	writel_relaxed(bit, pcie->base + SMP8759_STATUS + offset);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
update_msi_enable(struct irq_data * d,bool unmask)57*4882a593Smuzhiyun static void update_msi_enable(struct irq_data *d, bool unmask)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	unsigned long flags;
60*4882a593Smuzhiyun 	struct tango_pcie *pcie = d->chip_data;
61*4882a593Smuzhiyun 	u32 offset = (d->hwirq / 32) * 4;
62*4882a593Smuzhiyun 	u32 bit = BIT(d->hwirq % 32);
63*4882a593Smuzhiyun 	u32 val;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	spin_lock_irqsave(&pcie->used_msi_lock, flags);
66*4882a593Smuzhiyun 	val = readl_relaxed(pcie->base + SMP8759_ENABLE + offset);
67*4882a593Smuzhiyun 	val = unmask ? val | bit : val & ~bit;
68*4882a593Smuzhiyun 	writel_relaxed(val, pcie->base + SMP8759_ENABLE + offset);
69*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
tango_mask(struct irq_data * d)72*4882a593Smuzhiyun static void tango_mask(struct irq_data *d)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	update_msi_enable(d, false);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
tango_unmask(struct irq_data * d)77*4882a593Smuzhiyun static void tango_unmask(struct irq_data *d)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	update_msi_enable(d, true);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
tango_set_affinity(struct irq_data * d,const struct cpumask * mask,bool force)82*4882a593Smuzhiyun static int tango_set_affinity(struct irq_data *d, const struct cpumask *mask,
83*4882a593Smuzhiyun 			      bool force)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	return -EINVAL;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
tango_compose_msi_msg(struct irq_data * d,struct msi_msg * msg)88*4882a593Smuzhiyun static void tango_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	struct tango_pcie *pcie = d->chip_data;
91*4882a593Smuzhiyun 	msg->address_lo = lower_32_bits(pcie->msi_doorbell);
92*4882a593Smuzhiyun 	msg->address_hi = upper_32_bits(pcie->msi_doorbell);
93*4882a593Smuzhiyun 	msg->data = d->hwirq;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun static struct irq_chip tango_chip = {
97*4882a593Smuzhiyun 	.irq_ack		= tango_ack,
98*4882a593Smuzhiyun 	.irq_mask		= tango_mask,
99*4882a593Smuzhiyun 	.irq_unmask		= tango_unmask,
100*4882a593Smuzhiyun 	.irq_set_affinity	= tango_set_affinity,
101*4882a593Smuzhiyun 	.irq_compose_msi_msg	= tango_compose_msi_msg,
102*4882a593Smuzhiyun };
103*4882a593Smuzhiyun 
msi_ack(struct irq_data * d)104*4882a593Smuzhiyun static void msi_ack(struct irq_data *d)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	irq_chip_ack_parent(d);
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
msi_mask(struct irq_data * d)109*4882a593Smuzhiyun static void msi_mask(struct irq_data *d)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	pci_msi_mask_irq(d);
112*4882a593Smuzhiyun 	irq_chip_mask_parent(d);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
msi_unmask(struct irq_data * d)115*4882a593Smuzhiyun static void msi_unmask(struct irq_data *d)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	pci_msi_unmask_irq(d);
118*4882a593Smuzhiyun 	irq_chip_unmask_parent(d);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun static struct irq_chip msi_chip = {
122*4882a593Smuzhiyun 	.name = "MSI",
123*4882a593Smuzhiyun 	.irq_ack = msi_ack,
124*4882a593Smuzhiyun 	.irq_mask = msi_mask,
125*4882a593Smuzhiyun 	.irq_unmask = msi_unmask,
126*4882a593Smuzhiyun };
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun static struct msi_domain_info msi_dom_info = {
129*4882a593Smuzhiyun 	.flags	= MSI_FLAG_PCI_MSIX
130*4882a593Smuzhiyun 		| MSI_FLAG_USE_DEF_DOM_OPS
131*4882a593Smuzhiyun 		| MSI_FLAG_USE_DEF_CHIP_OPS,
132*4882a593Smuzhiyun 	.chip	= &msi_chip,
133*4882a593Smuzhiyun };
134*4882a593Smuzhiyun 
tango_irq_domain_alloc(struct irq_domain * dom,unsigned int virq,unsigned int nr_irqs,void * args)135*4882a593Smuzhiyun static int tango_irq_domain_alloc(struct irq_domain *dom, unsigned int virq,
136*4882a593Smuzhiyun 				  unsigned int nr_irqs, void *args)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun 	struct tango_pcie *pcie = dom->host_data;
139*4882a593Smuzhiyun 	unsigned long flags;
140*4882a593Smuzhiyun 	int pos;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	spin_lock_irqsave(&pcie->used_msi_lock, flags);
143*4882a593Smuzhiyun 	pos = find_first_zero_bit(pcie->used_msi, MSI_MAX);
144*4882a593Smuzhiyun 	if (pos >= MSI_MAX) {
145*4882a593Smuzhiyun 		spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
146*4882a593Smuzhiyun 		return -ENOSPC;
147*4882a593Smuzhiyun 	}
148*4882a593Smuzhiyun 	__set_bit(pos, pcie->used_msi);
149*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
150*4882a593Smuzhiyun 	irq_domain_set_info(dom, virq, pos, &tango_chip,
151*4882a593Smuzhiyun 			pcie, handle_edge_irq, NULL, NULL);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	return 0;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
tango_irq_domain_free(struct irq_domain * dom,unsigned int virq,unsigned int nr_irqs)156*4882a593Smuzhiyun static void tango_irq_domain_free(struct irq_domain *dom, unsigned int virq,
157*4882a593Smuzhiyun 				  unsigned int nr_irqs)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	unsigned long flags;
160*4882a593Smuzhiyun 	struct irq_data *d = irq_domain_get_irq_data(dom, virq);
161*4882a593Smuzhiyun 	struct tango_pcie *pcie = d->chip_data;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	spin_lock_irqsave(&pcie->used_msi_lock, flags);
164*4882a593Smuzhiyun 	__clear_bit(d->hwirq, pcie->used_msi);
165*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pcie->used_msi_lock, flags);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun static const struct irq_domain_ops dom_ops = {
169*4882a593Smuzhiyun 	.alloc	= tango_irq_domain_alloc,
170*4882a593Smuzhiyun 	.free	= tango_irq_domain_free,
171*4882a593Smuzhiyun };
172*4882a593Smuzhiyun 
smp8759_config_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)173*4882a593Smuzhiyun static int smp8759_config_read(struct pci_bus *bus, unsigned int devfn,
174*4882a593Smuzhiyun 			       int where, int size, u32 *val)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	struct pci_config_window *cfg = bus->sysdata;
177*4882a593Smuzhiyun 	struct tango_pcie *pcie = dev_get_drvdata(cfg->parent);
178*4882a593Smuzhiyun 	int ret;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	/* Reads in configuration space outside devfn 0 return garbage */
181*4882a593Smuzhiyun 	if (devfn != 0)
182*4882a593Smuzhiyun 		return PCIBIOS_FUNC_NOT_SUPPORTED;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	/*
185*4882a593Smuzhiyun 	 * PCI config and MMIO accesses are muxed.  Linux doesn't have a
186*4882a593Smuzhiyun 	 * mutual exclusion mechanism for config vs. MMIO accesses, so
187*4882a593Smuzhiyun 	 * concurrent accesses may cause corruption.
188*4882a593Smuzhiyun 	 */
189*4882a593Smuzhiyun 	writel_relaxed(1, pcie->base + SMP8759_MUX);
190*4882a593Smuzhiyun 	ret = pci_generic_config_read(bus, devfn, where, size, val);
191*4882a593Smuzhiyun 	writel_relaxed(0, pcie->base + SMP8759_MUX);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	return ret;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun 
smp8759_config_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)196*4882a593Smuzhiyun static int smp8759_config_write(struct pci_bus *bus, unsigned int devfn,
197*4882a593Smuzhiyun 				int where, int size, u32 val)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun 	struct pci_config_window *cfg = bus->sysdata;
200*4882a593Smuzhiyun 	struct tango_pcie *pcie = dev_get_drvdata(cfg->parent);
201*4882a593Smuzhiyun 	int ret;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	writel_relaxed(1, pcie->base + SMP8759_MUX);
204*4882a593Smuzhiyun 	ret = pci_generic_config_write(bus, devfn, where, size, val);
205*4882a593Smuzhiyun 	writel_relaxed(0, pcie->base + SMP8759_MUX);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	return ret;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun static const struct pci_ecam_ops smp8759_ecam_ops = {
211*4882a593Smuzhiyun 	.bus_shift	= 20,
212*4882a593Smuzhiyun 	.pci_ops	= {
213*4882a593Smuzhiyun 		.map_bus	= pci_ecam_map_bus,
214*4882a593Smuzhiyun 		.read		= smp8759_config_read,
215*4882a593Smuzhiyun 		.write		= smp8759_config_write,
216*4882a593Smuzhiyun 	}
217*4882a593Smuzhiyun };
218*4882a593Smuzhiyun 
tango_pcie_link_up(struct tango_pcie * pcie)219*4882a593Smuzhiyun static int tango_pcie_link_up(struct tango_pcie *pcie)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	void __iomem *test_out = pcie->base + SMP8759_TEST_OUT;
222*4882a593Smuzhiyun 	int i;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	writel_relaxed(16, test_out);
225*4882a593Smuzhiyun 	for (i = 0; i < 10; ++i) {
226*4882a593Smuzhiyun 		u32 ltssm_state = readl_relaxed(test_out) >> 8;
227*4882a593Smuzhiyun 		if ((ltssm_state & 0x1f) == 0xf) /* L0 */
228*4882a593Smuzhiyun 			return 1;
229*4882a593Smuzhiyun 		usleep_range(3000, 4000);
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	return 0;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun 
tango_pcie_probe(struct platform_device * pdev)235*4882a593Smuzhiyun static int tango_pcie_probe(struct platform_device *pdev)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
238*4882a593Smuzhiyun 	struct tango_pcie *pcie;
239*4882a593Smuzhiyun 	struct resource *res;
240*4882a593Smuzhiyun 	struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
241*4882a593Smuzhiyun 	struct irq_domain *msi_dom, *irq_dom;
242*4882a593Smuzhiyun 	struct of_pci_range_parser parser;
243*4882a593Smuzhiyun 	struct of_pci_range range;
244*4882a593Smuzhiyun 	int virq, offset;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	dev_warn(dev, "simultaneous PCI config and MMIO accesses may cause data corruption\n");
247*4882a593Smuzhiyun 	add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
250*4882a593Smuzhiyun 	if (!pcie)
251*4882a593Smuzhiyun 		return -ENOMEM;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
254*4882a593Smuzhiyun 	pcie->base = devm_ioremap_resource(dev, res);
255*4882a593Smuzhiyun 	if (IS_ERR(pcie->base))
256*4882a593Smuzhiyun 		return PTR_ERR(pcie->base);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	platform_set_drvdata(pdev, pcie);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	if (!tango_pcie_link_up(pcie))
261*4882a593Smuzhiyun 		return -ENODEV;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	if (of_pci_dma_range_parser_init(&parser, dev->of_node) < 0)
264*4882a593Smuzhiyun 		return -ENOENT;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	if (of_pci_range_parser_one(&parser, &range) == NULL)
267*4882a593Smuzhiyun 		return -ENOENT;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	range.pci_addr += range.size;
270*4882a593Smuzhiyun 	pcie->msi_doorbell = range.pci_addr + res->start + SMP8759_DOORBELL;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	for (offset = 0; offset < MSI_MAX / 8; offset += 4)
273*4882a593Smuzhiyun 		writel_relaxed(0, pcie->base + SMP8759_ENABLE + offset);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	virq = platform_get_irq(pdev, 1);
276*4882a593Smuzhiyun 	if (virq < 0)
277*4882a593Smuzhiyun 		return virq;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	irq_dom = irq_domain_create_linear(fwnode, MSI_MAX, &dom_ops, pcie);
280*4882a593Smuzhiyun 	if (!irq_dom) {
281*4882a593Smuzhiyun 		dev_err(dev, "Failed to create IRQ domain\n");
282*4882a593Smuzhiyun 		return -ENOMEM;
283*4882a593Smuzhiyun 	}
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	msi_dom = pci_msi_create_irq_domain(fwnode, &msi_dom_info, irq_dom);
286*4882a593Smuzhiyun 	if (!msi_dom) {
287*4882a593Smuzhiyun 		dev_err(dev, "Failed to create MSI domain\n");
288*4882a593Smuzhiyun 		irq_domain_remove(irq_dom);
289*4882a593Smuzhiyun 		return -ENOMEM;
290*4882a593Smuzhiyun 	}
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	pcie->dom = irq_dom;
293*4882a593Smuzhiyun 	spin_lock_init(&pcie->used_msi_lock);
294*4882a593Smuzhiyun 	irq_set_chained_handler_and_data(virq, tango_msi_isr, pcie);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	return pci_host_common_probe(pdev);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun static const struct of_device_id tango_pcie_ids[] = {
300*4882a593Smuzhiyun 	{
301*4882a593Smuzhiyun 		.compatible = "sigma,smp8759-pcie",
302*4882a593Smuzhiyun 		.data = &smp8759_ecam_ops,
303*4882a593Smuzhiyun 	},
304*4882a593Smuzhiyun 	{ },
305*4882a593Smuzhiyun };
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun static struct platform_driver tango_pcie_driver = {
308*4882a593Smuzhiyun 	.probe	= tango_pcie_probe,
309*4882a593Smuzhiyun 	.driver	= {
310*4882a593Smuzhiyun 		.name = KBUILD_MODNAME,
311*4882a593Smuzhiyun 		.of_match_table = tango_pcie_ids,
312*4882a593Smuzhiyun 		.suppress_bind_attrs = true,
313*4882a593Smuzhiyun 	},
314*4882a593Smuzhiyun };
315*4882a593Smuzhiyun builtin_platform_driver(tango_pcie_driver);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun /*
318*4882a593Smuzhiyun  * The root complex advertises the wrong device class.
319*4882a593Smuzhiyun  * Header Type 1 is for PCI-to-PCI bridges.
320*4882a593Smuzhiyun  */
tango_fixup_class(struct pci_dev * dev)321*4882a593Smuzhiyun static void tango_fixup_class(struct pci_dev *dev)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_class);
326*4882a593Smuzhiyun DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_class);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun /*
329*4882a593Smuzhiyun  * The root complex exposes a "fake" BAR, which is used to filter
330*4882a593Smuzhiyun  * bus-to-system accesses.  Only accesses within the range defined by this
331*4882a593Smuzhiyun  * BAR are forwarded to the host, others are ignored.
332*4882a593Smuzhiyun  *
333*4882a593Smuzhiyun  * By default, the DMA framework expects an identity mapping, and DRAM0 is
334*4882a593Smuzhiyun  * mapped at 0x80000000.
335*4882a593Smuzhiyun  */
tango_fixup_bar(struct pci_dev * dev)336*4882a593Smuzhiyun static void tango_fixup_bar(struct pci_dev *dev)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	dev->non_compliant_bars = true;
339*4882a593Smuzhiyun 	pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0x80000000);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_bar);
342*4882a593Smuzhiyun DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_bar);
343