xref: /OK3568_Linux_fs/kernel/arch/x86/pci/mmconfig_32.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2004 Matthew Wilcox <matthew@wil.cx>
4*4882a593Smuzhiyun  * Copyright (C) 2004 Intel Corp.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun /*
8*4882a593Smuzhiyun  * mmconfig.c - Low-level direct PCI config space access via MMCONFIG
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/pci.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/rcupdate.h>
14*4882a593Smuzhiyun #include <asm/e820/api.h>
15*4882a593Smuzhiyun #include <asm/pci_x86.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /* Assume systems with more busses have correct MCFG */
18*4882a593Smuzhiyun #define mmcfg_virt_addr ((void __iomem *) fix_to_virt(FIX_PCIE_MCFG))
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /* The base address of the last MMCONFIG device accessed */
21*4882a593Smuzhiyun static u32 mmcfg_last_accessed_device;
22*4882a593Smuzhiyun static int mmcfg_last_accessed_cpu;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun  * Functions for accessing PCI configuration space with MMCONFIG accesses
26*4882a593Smuzhiyun  */
get_base_addr(unsigned int seg,int bus,unsigned devfn)27*4882a593Smuzhiyun static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	if (cfg)
32*4882a593Smuzhiyun 		return cfg->address;
33*4882a593Smuzhiyun 	return 0;
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun  * This is always called under pci_config_lock
38*4882a593Smuzhiyun  */
pci_exp_set_dev_base(unsigned int base,int bus,int devfn)39*4882a593Smuzhiyun static void pci_exp_set_dev_base(unsigned int base, int bus, int devfn)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	u32 dev_base = base | PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12);
42*4882a593Smuzhiyun 	int cpu = smp_processor_id();
43*4882a593Smuzhiyun 	if (dev_base != mmcfg_last_accessed_device ||
44*4882a593Smuzhiyun 	    cpu != mmcfg_last_accessed_cpu) {
45*4882a593Smuzhiyun 		mmcfg_last_accessed_device = dev_base;
46*4882a593Smuzhiyun 		mmcfg_last_accessed_cpu = cpu;
47*4882a593Smuzhiyun 		set_fixmap_nocache(FIX_PCIE_MCFG, dev_base);
48*4882a593Smuzhiyun 	}
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
pci_mmcfg_read(unsigned int seg,unsigned int bus,unsigned int devfn,int reg,int len,u32 * value)51*4882a593Smuzhiyun static int pci_mmcfg_read(unsigned int seg, unsigned int bus,
52*4882a593Smuzhiyun 			  unsigned int devfn, int reg, int len, u32 *value)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	unsigned long flags;
55*4882a593Smuzhiyun 	u32 base;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	if ((bus > 255) || (devfn > 255) || (reg > 4095)) {
58*4882a593Smuzhiyun err:		*value = -1;
59*4882a593Smuzhiyun 		return -EINVAL;
60*4882a593Smuzhiyun 	}
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	rcu_read_lock();
63*4882a593Smuzhiyun 	base = get_base_addr(seg, bus, devfn);
64*4882a593Smuzhiyun 	if (!base) {
65*4882a593Smuzhiyun 		rcu_read_unlock();
66*4882a593Smuzhiyun 		goto err;
67*4882a593Smuzhiyun 	}
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&pci_config_lock, flags);
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	pci_exp_set_dev_base(base, bus, devfn);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	switch (len) {
74*4882a593Smuzhiyun 	case 1:
75*4882a593Smuzhiyun 		*value = mmio_config_readb(mmcfg_virt_addr + reg);
76*4882a593Smuzhiyun 		break;
77*4882a593Smuzhiyun 	case 2:
78*4882a593Smuzhiyun 		*value = mmio_config_readw(mmcfg_virt_addr + reg);
79*4882a593Smuzhiyun 		break;
80*4882a593Smuzhiyun 	case 4:
81*4882a593Smuzhiyun 		*value = mmio_config_readl(mmcfg_virt_addr + reg);
82*4882a593Smuzhiyun 		break;
83*4882a593Smuzhiyun 	}
84*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&pci_config_lock, flags);
85*4882a593Smuzhiyun 	rcu_read_unlock();
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	return 0;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
pci_mmcfg_write(unsigned int seg,unsigned int bus,unsigned int devfn,int reg,int len,u32 value)90*4882a593Smuzhiyun static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
91*4882a593Smuzhiyun 			   unsigned int devfn, int reg, int len, u32 value)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	unsigned long flags;
94*4882a593Smuzhiyun 	u32 base;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	if ((bus > 255) || (devfn > 255) || (reg > 4095))
97*4882a593Smuzhiyun 		return -EINVAL;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	rcu_read_lock();
100*4882a593Smuzhiyun 	base = get_base_addr(seg, bus, devfn);
101*4882a593Smuzhiyun 	if (!base) {
102*4882a593Smuzhiyun 		rcu_read_unlock();
103*4882a593Smuzhiyun 		return -EINVAL;
104*4882a593Smuzhiyun 	}
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&pci_config_lock, flags);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	pci_exp_set_dev_base(base, bus, devfn);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	switch (len) {
111*4882a593Smuzhiyun 	case 1:
112*4882a593Smuzhiyun 		mmio_config_writeb(mmcfg_virt_addr + reg, value);
113*4882a593Smuzhiyun 		break;
114*4882a593Smuzhiyun 	case 2:
115*4882a593Smuzhiyun 		mmio_config_writew(mmcfg_virt_addr + reg, value);
116*4882a593Smuzhiyun 		break;
117*4882a593Smuzhiyun 	case 4:
118*4882a593Smuzhiyun 		mmio_config_writel(mmcfg_virt_addr + reg, value);
119*4882a593Smuzhiyun 		break;
120*4882a593Smuzhiyun 	}
121*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&pci_config_lock, flags);
122*4882a593Smuzhiyun 	rcu_read_unlock();
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	return 0;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun const struct pci_raw_ops pci_mmcfg = {
128*4882a593Smuzhiyun 	.read =		pci_mmcfg_read,
129*4882a593Smuzhiyun 	.write =	pci_mmcfg_write,
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun 
pci_mmcfg_arch_init(void)132*4882a593Smuzhiyun int __init pci_mmcfg_arch_init(void)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	printk(KERN_INFO "PCI: Using MMCONFIG for extended config space\n");
135*4882a593Smuzhiyun 	raw_pci_ext_ops = &pci_mmcfg;
136*4882a593Smuzhiyun 	return 1;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
pci_mmcfg_arch_free(void)139*4882a593Smuzhiyun void __init pci_mmcfg_arch_free(void)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
pci_mmcfg_arch_map(struct pci_mmcfg_region * cfg)143*4882a593Smuzhiyun int pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	return 0;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun 
pci_mmcfg_arch_unmap(struct pci_mmcfg_region * cfg)148*4882a593Smuzhiyun void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	unsigned long flags;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	/* Invalidate the cached mmcfg map entry. */
153*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&pci_config_lock, flags);
154*4882a593Smuzhiyun 	mmcfg_last_accessed_device = 0;
155*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&pci_config_lock, flags);
156*4882a593Smuzhiyun }
157