xref: /OK3568_Linux_fs/kernel/arch/s390/pci/pci.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright IBM Corp. 2012
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Author(s):
6*4882a593Smuzhiyun  *   Jan Glauber <jang@linux.vnet.ibm.com>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * The System z PCI code is a rewrite from a prototype by
9*4882a593Smuzhiyun  * the following people (Kudoz!):
10*4882a593Smuzhiyun  *   Alexander Schmidt
11*4882a593Smuzhiyun  *   Christoph Raisch
12*4882a593Smuzhiyun  *   Hannes Hering
13*4882a593Smuzhiyun  *   Hoang-Nam Nguyen
14*4882a593Smuzhiyun  *   Jan-Bernd Themann
15*4882a593Smuzhiyun  *   Stefan Roscher
16*4882a593Smuzhiyun  *   Thomas Klein
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #define KMSG_COMPONENT "zpci"
20*4882a593Smuzhiyun #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include <linux/kernel.h>
23*4882a593Smuzhiyun #include <linux/slab.h>
24*4882a593Smuzhiyun #include <linux/err.h>
25*4882a593Smuzhiyun #include <linux/export.h>
26*4882a593Smuzhiyun #include <linux/delay.h>
27*4882a593Smuzhiyun #include <linux/seq_file.h>
28*4882a593Smuzhiyun #include <linux/jump_label.h>
29*4882a593Smuzhiyun #include <linux/pci.h>
30*4882a593Smuzhiyun #include <linux/printk.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include <asm/isc.h>
33*4882a593Smuzhiyun #include <asm/airq.h>
34*4882a593Smuzhiyun #include <asm/facility.h>
35*4882a593Smuzhiyun #include <asm/pci_insn.h>
36*4882a593Smuzhiyun #include <asm/pci_clp.h>
37*4882a593Smuzhiyun #include <asm/pci_dma.h>
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #include "pci_bus.h"
40*4882a593Smuzhiyun #include "pci_iov.h"
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /* list of all detected zpci devices */
43*4882a593Smuzhiyun static LIST_HEAD(zpci_list);
44*4882a593Smuzhiyun static DEFINE_SPINLOCK(zpci_list_lock);
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
47*4882a593Smuzhiyun static DEFINE_SPINLOCK(zpci_domain_lock);
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #define ZPCI_IOMAP_ENTRIES						\
50*4882a593Smuzhiyun 	min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2),	\
51*4882a593Smuzhiyun 	    ZPCI_IOMAP_MAX_ENTRIES)
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun unsigned int s390_pci_no_rid;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun static DEFINE_SPINLOCK(zpci_iomap_lock);
56*4882a593Smuzhiyun static unsigned long *zpci_iomap_bitmap;
57*4882a593Smuzhiyun struct zpci_iomap_entry *zpci_iomap_start;
58*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(zpci_iomap_start);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun DEFINE_STATIC_KEY_FALSE(have_mio);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun static struct kmem_cache *zdev_fmb_cache;
63*4882a593Smuzhiyun 
get_zdev_by_fid(u32 fid)64*4882a593Smuzhiyun struct zpci_dev *get_zdev_by_fid(u32 fid)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	struct zpci_dev *tmp, *zdev = NULL;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	spin_lock(&zpci_list_lock);
69*4882a593Smuzhiyun 	list_for_each_entry(tmp, &zpci_list, entry) {
70*4882a593Smuzhiyun 		if (tmp->fid == fid) {
71*4882a593Smuzhiyun 			zdev = tmp;
72*4882a593Smuzhiyun 			zpci_zdev_get(zdev);
73*4882a593Smuzhiyun 			break;
74*4882a593Smuzhiyun 		}
75*4882a593Smuzhiyun 	}
76*4882a593Smuzhiyun 	spin_unlock(&zpci_list_lock);
77*4882a593Smuzhiyun 	return zdev;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
zpci_remove_reserved_devices(void)80*4882a593Smuzhiyun void zpci_remove_reserved_devices(void)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	struct zpci_dev *tmp, *zdev;
83*4882a593Smuzhiyun 	enum zpci_state state;
84*4882a593Smuzhiyun 	LIST_HEAD(remove);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	spin_lock(&zpci_list_lock);
87*4882a593Smuzhiyun 	list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
88*4882a593Smuzhiyun 		if (zdev->state == ZPCI_FN_STATE_STANDBY &&
89*4882a593Smuzhiyun 		    !clp_get_state(zdev->fid, &state) &&
90*4882a593Smuzhiyun 		    state == ZPCI_FN_STATE_RESERVED)
91*4882a593Smuzhiyun 			list_move_tail(&zdev->entry, &remove);
92*4882a593Smuzhiyun 	}
93*4882a593Smuzhiyun 	spin_unlock(&zpci_list_lock);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	list_for_each_entry_safe(zdev, tmp, &remove, entry)
96*4882a593Smuzhiyun 		zpci_device_reserved(zdev);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
pci_domain_nr(struct pci_bus * bus)99*4882a593Smuzhiyun int pci_domain_nr(struct pci_bus *bus)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	return ((struct zpci_bus *) bus->sysdata)->domain_nr;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_domain_nr);
104*4882a593Smuzhiyun 
pci_proc_domain(struct pci_bus * bus)105*4882a593Smuzhiyun int pci_proc_domain(struct pci_bus *bus)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	return pci_domain_nr(bus);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pci_proc_domain);
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /* Modify PCI: Register I/O address translation parameters */
zpci_register_ioat(struct zpci_dev * zdev,u8 dmaas,u64 base,u64 limit,u64 iota)112*4882a593Smuzhiyun int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
113*4882a593Smuzhiyun 		       u64 base, u64 limit, u64 iota)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
116*4882a593Smuzhiyun 	struct zpci_fib fib = {0};
117*4882a593Smuzhiyun 	u8 status;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	WARN_ON_ONCE(iota & 0x3fff);
120*4882a593Smuzhiyun 	fib.pba = base;
121*4882a593Smuzhiyun 	fib.pal = limit;
122*4882a593Smuzhiyun 	fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
123*4882a593Smuzhiyun 	return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun /* Modify PCI: Unregister I/O address translation parameters */
zpci_unregister_ioat(struct zpci_dev * zdev,u8 dmaas)127*4882a593Smuzhiyun int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
130*4882a593Smuzhiyun 	struct zpci_fib fib = {0};
131*4882a593Smuzhiyun 	u8 cc, status;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	cc = zpci_mod_fc(req, &fib, &status);
134*4882a593Smuzhiyun 	if (cc == 3) /* Function already gone. */
135*4882a593Smuzhiyun 		cc = 0;
136*4882a593Smuzhiyun 	return cc ? -EIO : 0;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun /* Modify PCI: Set PCI function measurement parameters */
zpci_fmb_enable_device(struct zpci_dev * zdev)140*4882a593Smuzhiyun int zpci_fmb_enable_device(struct zpci_dev *zdev)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
143*4882a593Smuzhiyun 	struct zpci_fib fib = {0};
144*4882a593Smuzhiyun 	u8 cc, status;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
147*4882a593Smuzhiyun 		return -EINVAL;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
150*4882a593Smuzhiyun 	if (!zdev->fmb)
151*4882a593Smuzhiyun 		return -ENOMEM;
152*4882a593Smuzhiyun 	WARN_ON((u64) zdev->fmb & 0xf);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/* reset software counters */
155*4882a593Smuzhiyun 	atomic64_set(&zdev->allocated_pages, 0);
156*4882a593Smuzhiyun 	atomic64_set(&zdev->mapped_pages, 0);
157*4882a593Smuzhiyun 	atomic64_set(&zdev->unmapped_pages, 0);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	fib.fmb_addr = virt_to_phys(zdev->fmb);
160*4882a593Smuzhiyun 	cc = zpci_mod_fc(req, &fib, &status);
161*4882a593Smuzhiyun 	if (cc) {
162*4882a593Smuzhiyun 		kmem_cache_free(zdev_fmb_cache, zdev->fmb);
163*4882a593Smuzhiyun 		zdev->fmb = NULL;
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 	return cc ? -EIO : 0;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun /* Modify PCI: Disable PCI function measurement */
zpci_fmb_disable_device(struct zpci_dev * zdev)169*4882a593Smuzhiyun int zpci_fmb_disable_device(struct zpci_dev *zdev)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
172*4882a593Smuzhiyun 	struct zpci_fib fib = {0};
173*4882a593Smuzhiyun 	u8 cc, status;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	if (!zdev->fmb)
176*4882a593Smuzhiyun 		return -EINVAL;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	/* Function measurement is disabled if fmb address is zero */
179*4882a593Smuzhiyun 	cc = zpci_mod_fc(req, &fib, &status);
180*4882a593Smuzhiyun 	if (cc == 3) /* Function already gone. */
181*4882a593Smuzhiyun 		cc = 0;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	if (!cc) {
184*4882a593Smuzhiyun 		kmem_cache_free(zdev_fmb_cache, zdev->fmb);
185*4882a593Smuzhiyun 		zdev->fmb = NULL;
186*4882a593Smuzhiyun 	}
187*4882a593Smuzhiyun 	return cc ? -EIO : 0;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
zpci_cfg_load(struct zpci_dev * zdev,int offset,u32 * val,u8 len)190*4882a593Smuzhiyun static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
193*4882a593Smuzhiyun 	u64 data;
194*4882a593Smuzhiyun 	int rc;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	rc = __zpci_load(&data, req, offset);
197*4882a593Smuzhiyun 	if (!rc) {
198*4882a593Smuzhiyun 		data = le64_to_cpu((__force __le64) data);
199*4882a593Smuzhiyun 		data >>= (8 - len) * 8;
200*4882a593Smuzhiyun 		*val = (u32) data;
201*4882a593Smuzhiyun 	} else
202*4882a593Smuzhiyun 		*val = 0xffffffff;
203*4882a593Smuzhiyun 	return rc;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun 
zpci_cfg_store(struct zpci_dev * zdev,int offset,u32 val,u8 len)206*4882a593Smuzhiyun static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
209*4882a593Smuzhiyun 	u64 data = val;
210*4882a593Smuzhiyun 	int rc;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	data <<= (8 - len) * 8;
213*4882a593Smuzhiyun 	data = (__force u64) cpu_to_le64(data);
214*4882a593Smuzhiyun 	rc = __zpci_store(data, req, offset);
215*4882a593Smuzhiyun 	return rc;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
pcibios_align_resource(void * data,const struct resource * res,resource_size_t size,resource_size_t align)218*4882a593Smuzhiyun resource_size_t pcibios_align_resource(void *data, const struct resource *res,
219*4882a593Smuzhiyun 				       resource_size_t size,
220*4882a593Smuzhiyun 				       resource_size_t align)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	return 0;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun /* combine single writes by using store-block insn */
__iowrite64_copy(void __iomem * to,const void * from,size_t count)226*4882a593Smuzhiyun void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun        zpci_memcpy_toio(to, from, count);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
__ioremap(phys_addr_t addr,size_t size,pgprot_t prot)231*4882a593Smuzhiyun static void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	unsigned long offset, vaddr;
234*4882a593Smuzhiyun 	struct vm_struct *area;
235*4882a593Smuzhiyun 	phys_addr_t last_addr;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	last_addr = addr + size - 1;
238*4882a593Smuzhiyun 	if (!size || last_addr < addr)
239*4882a593Smuzhiyun 		return NULL;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	if (!static_branch_unlikely(&have_mio))
242*4882a593Smuzhiyun 		return (void __iomem *) addr;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	offset = addr & ~PAGE_MASK;
245*4882a593Smuzhiyun 	addr &= PAGE_MASK;
246*4882a593Smuzhiyun 	size = PAGE_ALIGN(size + offset);
247*4882a593Smuzhiyun 	area = get_vm_area(size, VM_IOREMAP);
248*4882a593Smuzhiyun 	if (!area)
249*4882a593Smuzhiyun 		return NULL;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	vaddr = (unsigned long) area->addr;
252*4882a593Smuzhiyun 	if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
253*4882a593Smuzhiyun 		free_vm_area(area);
254*4882a593Smuzhiyun 		return NULL;
255*4882a593Smuzhiyun 	}
256*4882a593Smuzhiyun 	return (void __iomem *) ((unsigned long) area->addr + offset);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
ioremap_prot(phys_addr_t addr,size_t size,unsigned long prot)259*4882a593Smuzhiyun void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	return __ioremap(addr, size, __pgprot(prot));
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun EXPORT_SYMBOL(ioremap_prot);
264*4882a593Smuzhiyun 
ioremap(phys_addr_t addr,size_t size)265*4882a593Smuzhiyun void __iomem *ioremap(phys_addr_t addr, size_t size)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	return __ioremap(addr, size, PAGE_KERNEL);
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun EXPORT_SYMBOL(ioremap);
270*4882a593Smuzhiyun 
ioremap_wc(phys_addr_t addr,size_t size)271*4882a593Smuzhiyun void __iomem *ioremap_wc(phys_addr_t addr, size_t size)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	return __ioremap(addr, size, pgprot_writecombine(PAGE_KERNEL));
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun EXPORT_SYMBOL(ioremap_wc);
276*4882a593Smuzhiyun 
ioremap_wt(phys_addr_t addr,size_t size)277*4882a593Smuzhiyun void __iomem *ioremap_wt(phys_addr_t addr, size_t size)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun 	return __ioremap(addr, size, pgprot_writethrough(PAGE_KERNEL));
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun EXPORT_SYMBOL(ioremap_wt);
282*4882a593Smuzhiyun 
iounmap(volatile void __iomem * addr)283*4882a593Smuzhiyun void iounmap(volatile void __iomem *addr)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	if (static_branch_likely(&have_mio))
286*4882a593Smuzhiyun 		vunmap((__force void *) ((unsigned long) addr & PAGE_MASK));
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun EXPORT_SYMBOL(iounmap);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun /* Create a virtual mapping cookie for a PCI BAR */
pci_iomap_range_fh(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long max)291*4882a593Smuzhiyun static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
292*4882a593Smuzhiyun 					unsigned long offset, unsigned long max)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	struct zpci_dev *zdev =	to_zpci(pdev);
295*4882a593Smuzhiyun 	int idx;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	idx = zdev->bars[bar].map_idx;
298*4882a593Smuzhiyun 	spin_lock(&zpci_iomap_lock);
299*4882a593Smuzhiyun 	/* Detect overrun */
300*4882a593Smuzhiyun 	WARN_ON(!++zpci_iomap_start[idx].count);
301*4882a593Smuzhiyun 	zpci_iomap_start[idx].fh = zdev->fh;
302*4882a593Smuzhiyun 	zpci_iomap_start[idx].bar = bar;
303*4882a593Smuzhiyun 	spin_unlock(&zpci_iomap_lock);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	return (void __iomem *) ZPCI_ADDR(idx) + offset;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
pci_iomap_range_mio(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long max)308*4882a593Smuzhiyun static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
309*4882a593Smuzhiyun 					 unsigned long offset,
310*4882a593Smuzhiyun 					 unsigned long max)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	unsigned long barsize = pci_resource_len(pdev, bar);
313*4882a593Smuzhiyun 	struct zpci_dev *zdev = to_zpci(pdev);
314*4882a593Smuzhiyun 	void __iomem *iova;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
317*4882a593Smuzhiyun 	return iova ? iova + offset : iova;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
pci_iomap_range(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long max)320*4882a593Smuzhiyun void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
321*4882a593Smuzhiyun 			      unsigned long offset, unsigned long max)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
324*4882a593Smuzhiyun 		return NULL;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	if (static_branch_likely(&have_mio))
327*4882a593Smuzhiyun 		return pci_iomap_range_mio(pdev, bar, offset, max);
328*4882a593Smuzhiyun 	else
329*4882a593Smuzhiyun 		return pci_iomap_range_fh(pdev, bar, offset, max);
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun EXPORT_SYMBOL(pci_iomap_range);
332*4882a593Smuzhiyun 
pci_iomap(struct pci_dev * dev,int bar,unsigned long maxlen)333*4882a593Smuzhiyun void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	return pci_iomap_range(dev, bar, 0, maxlen);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun EXPORT_SYMBOL(pci_iomap);
338*4882a593Smuzhiyun 
pci_iomap_wc_range_mio(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long max)339*4882a593Smuzhiyun static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
340*4882a593Smuzhiyun 					    unsigned long offset, unsigned long max)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun 	unsigned long barsize = pci_resource_len(pdev, bar);
343*4882a593Smuzhiyun 	struct zpci_dev *zdev = to_zpci(pdev);
344*4882a593Smuzhiyun 	void __iomem *iova;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
347*4882a593Smuzhiyun 	return iova ? iova + offset : iova;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
pci_iomap_wc_range(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long max)350*4882a593Smuzhiyun void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
351*4882a593Smuzhiyun 				 unsigned long offset, unsigned long max)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
354*4882a593Smuzhiyun 		return NULL;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	if (static_branch_likely(&have_mio))
357*4882a593Smuzhiyun 		return pci_iomap_wc_range_mio(pdev, bar, offset, max);
358*4882a593Smuzhiyun 	else
359*4882a593Smuzhiyun 		return pci_iomap_range_fh(pdev, bar, offset, max);
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun EXPORT_SYMBOL(pci_iomap_wc_range);
362*4882a593Smuzhiyun 
pci_iomap_wc(struct pci_dev * dev,int bar,unsigned long maxlen)363*4882a593Smuzhiyun void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	return pci_iomap_wc_range(dev, bar, 0, maxlen);
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun EXPORT_SYMBOL(pci_iomap_wc);
368*4882a593Smuzhiyun 
pci_iounmap_fh(struct pci_dev * pdev,void __iomem * addr)369*4882a593Smuzhiyun static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	unsigned int idx = ZPCI_IDX(addr);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	spin_lock(&zpci_iomap_lock);
374*4882a593Smuzhiyun 	/* Detect underrun */
375*4882a593Smuzhiyun 	WARN_ON(!zpci_iomap_start[idx].count);
376*4882a593Smuzhiyun 	if (!--zpci_iomap_start[idx].count) {
377*4882a593Smuzhiyun 		zpci_iomap_start[idx].fh = 0;
378*4882a593Smuzhiyun 		zpci_iomap_start[idx].bar = 0;
379*4882a593Smuzhiyun 	}
380*4882a593Smuzhiyun 	spin_unlock(&zpci_iomap_lock);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
pci_iounmap_mio(struct pci_dev * pdev,void __iomem * addr)383*4882a593Smuzhiyun static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun 	iounmap(addr);
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun 
pci_iounmap(struct pci_dev * pdev,void __iomem * addr)388*4882a593Smuzhiyun void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun 	if (static_branch_likely(&have_mio))
391*4882a593Smuzhiyun 		pci_iounmap_mio(pdev, addr);
392*4882a593Smuzhiyun 	else
393*4882a593Smuzhiyun 		pci_iounmap_fh(pdev, addr);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun EXPORT_SYMBOL(pci_iounmap);
396*4882a593Smuzhiyun 
pci_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)397*4882a593Smuzhiyun static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
398*4882a593Smuzhiyun 		    int size, u32 *val)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun 	struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
pci_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)405*4882a593Smuzhiyun static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
406*4882a593Smuzhiyun 		     int size, u32 val)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun static struct pci_ops pci_root_ops = {
414*4882a593Smuzhiyun 	.read = pci_read,
415*4882a593Smuzhiyun 	.write = pci_write,
416*4882a593Smuzhiyun };
417*4882a593Smuzhiyun 
zpci_map_resources(struct pci_dev * pdev)418*4882a593Smuzhiyun static void zpci_map_resources(struct pci_dev *pdev)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	struct zpci_dev *zdev = to_zpci(pdev);
421*4882a593Smuzhiyun 	resource_size_t len;
422*4882a593Smuzhiyun 	int i;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
425*4882a593Smuzhiyun 		len = pci_resource_len(pdev, i);
426*4882a593Smuzhiyun 		if (!len)
427*4882a593Smuzhiyun 			continue;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 		if (zpci_use_mio(zdev))
430*4882a593Smuzhiyun 			pdev->resource[i].start =
431*4882a593Smuzhiyun 				(resource_size_t __force) zdev->bars[i].mio_wt;
432*4882a593Smuzhiyun 		else
433*4882a593Smuzhiyun 			pdev->resource[i].start = (resource_size_t __force)
434*4882a593Smuzhiyun 				pci_iomap_range_fh(pdev, i, 0, 0);
435*4882a593Smuzhiyun 		pdev->resource[i].end = pdev->resource[i].start + len - 1;
436*4882a593Smuzhiyun 	}
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	zpci_iov_map_resources(pdev);
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun 
zpci_unmap_resources(struct pci_dev * pdev)441*4882a593Smuzhiyun static void zpci_unmap_resources(struct pci_dev *pdev)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun 	struct zpci_dev *zdev = to_zpci(pdev);
444*4882a593Smuzhiyun 	resource_size_t len;
445*4882a593Smuzhiyun 	int i;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	if (zpci_use_mio(zdev))
448*4882a593Smuzhiyun 		return;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
451*4882a593Smuzhiyun 		len = pci_resource_len(pdev, i);
452*4882a593Smuzhiyun 		if (!len)
453*4882a593Smuzhiyun 			continue;
454*4882a593Smuzhiyun 		pci_iounmap_fh(pdev, (void __iomem __force *)
455*4882a593Smuzhiyun 			       pdev->resource[i].start);
456*4882a593Smuzhiyun 	}
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun 
zpci_alloc_iomap(struct zpci_dev * zdev)459*4882a593Smuzhiyun static int zpci_alloc_iomap(struct zpci_dev *zdev)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	unsigned long entry;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	spin_lock(&zpci_iomap_lock);
464*4882a593Smuzhiyun 	entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
465*4882a593Smuzhiyun 	if (entry == ZPCI_IOMAP_ENTRIES) {
466*4882a593Smuzhiyun 		spin_unlock(&zpci_iomap_lock);
467*4882a593Smuzhiyun 		return -ENOSPC;
468*4882a593Smuzhiyun 	}
469*4882a593Smuzhiyun 	set_bit(entry, zpci_iomap_bitmap);
470*4882a593Smuzhiyun 	spin_unlock(&zpci_iomap_lock);
471*4882a593Smuzhiyun 	return entry;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun 
zpci_free_iomap(struct zpci_dev * zdev,int entry)474*4882a593Smuzhiyun static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun 	spin_lock(&zpci_iomap_lock);
477*4882a593Smuzhiyun 	memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
478*4882a593Smuzhiyun 	clear_bit(entry, zpci_iomap_bitmap);
479*4882a593Smuzhiyun 	spin_unlock(&zpci_iomap_lock);
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun 
__alloc_res(struct zpci_dev * zdev,unsigned long start,unsigned long size,unsigned long flags)482*4882a593Smuzhiyun static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
483*4882a593Smuzhiyun 				    unsigned long size, unsigned long flags)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	struct resource *r;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	r = kzalloc(sizeof(*r), GFP_KERNEL);
488*4882a593Smuzhiyun 	if (!r)
489*4882a593Smuzhiyun 		return NULL;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	r->start = start;
492*4882a593Smuzhiyun 	r->end = r->start + size - 1;
493*4882a593Smuzhiyun 	r->flags = flags;
494*4882a593Smuzhiyun 	r->name = zdev->res_name;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	if (request_resource(&iomem_resource, r)) {
497*4882a593Smuzhiyun 		kfree(r);
498*4882a593Smuzhiyun 		return NULL;
499*4882a593Smuzhiyun 	}
500*4882a593Smuzhiyun 	return r;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun 
zpci_setup_bus_resources(struct zpci_dev * zdev,struct list_head * resources)503*4882a593Smuzhiyun int zpci_setup_bus_resources(struct zpci_dev *zdev,
504*4882a593Smuzhiyun 			     struct list_head *resources)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun 	unsigned long addr, size, flags;
507*4882a593Smuzhiyun 	struct resource *res;
508*4882a593Smuzhiyun 	int i, entry;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	snprintf(zdev->res_name, sizeof(zdev->res_name),
511*4882a593Smuzhiyun 		 "PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
514*4882a593Smuzhiyun 		if (!zdev->bars[i].size)
515*4882a593Smuzhiyun 			continue;
516*4882a593Smuzhiyun 		entry = zpci_alloc_iomap(zdev);
517*4882a593Smuzhiyun 		if (entry < 0)
518*4882a593Smuzhiyun 			return entry;
519*4882a593Smuzhiyun 		zdev->bars[i].map_idx = entry;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 		/* only MMIO is supported */
522*4882a593Smuzhiyun 		flags = IORESOURCE_MEM;
523*4882a593Smuzhiyun 		if (zdev->bars[i].val & 8)
524*4882a593Smuzhiyun 			flags |= IORESOURCE_PREFETCH;
525*4882a593Smuzhiyun 		if (zdev->bars[i].val & 4)
526*4882a593Smuzhiyun 			flags |= IORESOURCE_MEM_64;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 		if (zpci_use_mio(zdev))
529*4882a593Smuzhiyun 			addr = (unsigned long) zdev->bars[i].mio_wt;
530*4882a593Smuzhiyun 		else
531*4882a593Smuzhiyun 			addr = ZPCI_ADDR(entry);
532*4882a593Smuzhiyun 		size = 1UL << zdev->bars[i].size;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 		res = __alloc_res(zdev, addr, size, flags);
535*4882a593Smuzhiyun 		if (!res) {
536*4882a593Smuzhiyun 			zpci_free_iomap(zdev, entry);
537*4882a593Smuzhiyun 			return -ENOMEM;
538*4882a593Smuzhiyun 		}
539*4882a593Smuzhiyun 		zdev->bars[i].res = res;
540*4882a593Smuzhiyun 		pci_add_resource(resources, res);
541*4882a593Smuzhiyun 	}
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	return 0;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun 
zpci_cleanup_bus_resources(struct zpci_dev * zdev)546*4882a593Smuzhiyun static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun 	int i;
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
551*4882a593Smuzhiyun 		if (!zdev->bars[i].size || !zdev->bars[i].res)
552*4882a593Smuzhiyun 			continue;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 		zpci_free_iomap(zdev, zdev->bars[i].map_idx);
555*4882a593Smuzhiyun 		release_resource(zdev->bars[i].res);
556*4882a593Smuzhiyun 		kfree(zdev->bars[i].res);
557*4882a593Smuzhiyun 	}
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun 
pcibios_add_device(struct pci_dev * pdev)560*4882a593Smuzhiyun int pcibios_add_device(struct pci_dev *pdev)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun 	struct zpci_dev *zdev = to_zpci(pdev);
563*4882a593Smuzhiyun 	struct resource *res;
564*4882a593Smuzhiyun 	int i;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	/* The pdev has a reference to the zdev via its bus */
567*4882a593Smuzhiyun 	zpci_zdev_get(zdev);
568*4882a593Smuzhiyun 	if (pdev->is_physfn)
569*4882a593Smuzhiyun 		pdev->no_vf_scan = 1;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	pdev->dev.groups = zpci_attr_groups;
572*4882a593Smuzhiyun 	pdev->dev.dma_ops = &s390_pci_dma_ops;
573*4882a593Smuzhiyun 	zpci_map_resources(pdev);
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
576*4882a593Smuzhiyun 		res = &pdev->resource[i];
577*4882a593Smuzhiyun 		if (res->parent || !res->flags)
578*4882a593Smuzhiyun 			continue;
579*4882a593Smuzhiyun 		pci_claim_resource(pdev, i);
580*4882a593Smuzhiyun 	}
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	return 0;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun 
pcibios_release_device(struct pci_dev * pdev)585*4882a593Smuzhiyun void pcibios_release_device(struct pci_dev *pdev)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	struct zpci_dev *zdev = to_zpci(pdev);
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	zpci_unmap_resources(pdev);
590*4882a593Smuzhiyun 	zpci_zdev_put(zdev);
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun 
pcibios_enable_device(struct pci_dev * pdev,int mask)593*4882a593Smuzhiyun int pcibios_enable_device(struct pci_dev *pdev, int mask)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun 	struct zpci_dev *zdev = to_zpci(pdev);
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	zpci_debug_init_device(zdev, dev_name(&pdev->dev));
598*4882a593Smuzhiyun 	zpci_fmb_enable_device(zdev);
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	return pci_enable_resources(pdev, mask);
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun 
pcibios_disable_device(struct pci_dev * pdev)603*4882a593Smuzhiyun void pcibios_disable_device(struct pci_dev *pdev)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun 	struct zpci_dev *zdev = to_zpci(pdev);
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	zpci_fmb_disable_device(zdev);
608*4882a593Smuzhiyun 	zpci_debug_exit_device(zdev);
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun 
__zpci_register_domain(int domain)611*4882a593Smuzhiyun static int __zpci_register_domain(int domain)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun 	spin_lock(&zpci_domain_lock);
614*4882a593Smuzhiyun 	if (test_bit(domain, zpci_domain)) {
615*4882a593Smuzhiyun 		spin_unlock(&zpci_domain_lock);
616*4882a593Smuzhiyun 		pr_err("Domain %04x is already assigned\n", domain);
617*4882a593Smuzhiyun 		return -EEXIST;
618*4882a593Smuzhiyun 	}
619*4882a593Smuzhiyun 	set_bit(domain, zpci_domain);
620*4882a593Smuzhiyun 	spin_unlock(&zpci_domain_lock);
621*4882a593Smuzhiyun 	return domain;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun 
__zpci_alloc_domain(void)624*4882a593Smuzhiyun static int __zpci_alloc_domain(void)
625*4882a593Smuzhiyun {
626*4882a593Smuzhiyun 	int domain;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	spin_lock(&zpci_domain_lock);
629*4882a593Smuzhiyun 	/*
630*4882a593Smuzhiyun 	 * We can always auto allocate domains below ZPCI_NR_DEVICES.
631*4882a593Smuzhiyun 	 * There is either a free domain or we have reached the maximum in
632*4882a593Smuzhiyun 	 * which case we would have bailed earlier.
633*4882a593Smuzhiyun 	 */
634*4882a593Smuzhiyun 	domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
635*4882a593Smuzhiyun 	set_bit(domain, zpci_domain);
636*4882a593Smuzhiyun 	spin_unlock(&zpci_domain_lock);
637*4882a593Smuzhiyun 	return domain;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun 
zpci_alloc_domain(int domain)640*4882a593Smuzhiyun int zpci_alloc_domain(int domain)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun 	if (zpci_unique_uid) {
643*4882a593Smuzhiyun 		if (domain)
644*4882a593Smuzhiyun 			return __zpci_register_domain(domain);
645*4882a593Smuzhiyun 		pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
646*4882a593Smuzhiyun 		update_uid_checking(false);
647*4882a593Smuzhiyun 	}
648*4882a593Smuzhiyun 	return __zpci_alloc_domain();
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun 
zpci_free_domain(int domain)651*4882a593Smuzhiyun void zpci_free_domain(int domain)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun 	spin_lock(&zpci_domain_lock);
654*4882a593Smuzhiyun 	clear_bit(domain, zpci_domain);
655*4882a593Smuzhiyun 	spin_unlock(&zpci_domain_lock);
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 
zpci_enable_device(struct zpci_dev * zdev)659*4882a593Smuzhiyun int zpci_enable_device(struct zpci_dev *zdev)
660*4882a593Smuzhiyun {
661*4882a593Smuzhiyun 	int rc;
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	if (clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES)) {
664*4882a593Smuzhiyun 		rc = -EIO;
665*4882a593Smuzhiyun 		goto out;
666*4882a593Smuzhiyun 	}
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	rc = zpci_dma_init_device(zdev);
669*4882a593Smuzhiyun 	if (rc)
670*4882a593Smuzhiyun 		goto out_dma;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	zdev->state = ZPCI_FN_STATE_ONLINE;
673*4882a593Smuzhiyun 	return 0;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun out_dma:
676*4882a593Smuzhiyun 	clp_disable_fh(zdev);
677*4882a593Smuzhiyun out:
678*4882a593Smuzhiyun 	return rc;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(zpci_enable_device);
681*4882a593Smuzhiyun 
zpci_disable_device(struct zpci_dev * zdev)682*4882a593Smuzhiyun int zpci_disable_device(struct zpci_dev *zdev)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun 	zpci_dma_exit_device(zdev);
685*4882a593Smuzhiyun 	/*
686*4882a593Smuzhiyun 	 * The zPCI function may already be disabled by the platform, this is
687*4882a593Smuzhiyun 	 * detected in clp_disable_fh() which becomes a no-op.
688*4882a593Smuzhiyun 	 */
689*4882a593Smuzhiyun 	return clp_disable_fh(zdev) ? -EIO : 0;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(zpci_disable_device);
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun /* zpci_remove_device - Removes the given zdev from the PCI core
694*4882a593Smuzhiyun  * @zdev: the zdev to be removed from the PCI core
695*4882a593Smuzhiyun  * @set_error: if true the device's error state is set to permanent failure
696*4882a593Smuzhiyun  *
697*4882a593Smuzhiyun  * Sets a zPCI device to a configured but offline state; the zPCI
698*4882a593Smuzhiyun  * device is still accessible through its hotplug slot and the zPCI
699*4882a593Smuzhiyun  * API but is removed from the common code PCI bus, making it
700*4882a593Smuzhiyun  * no longer available to drivers.
701*4882a593Smuzhiyun  */
zpci_remove_device(struct zpci_dev * zdev,bool set_error)702*4882a593Smuzhiyun void zpci_remove_device(struct zpci_dev *zdev, bool set_error)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun 	struct zpci_bus *zbus = zdev->zbus;
705*4882a593Smuzhiyun 	struct pci_dev *pdev;
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	if (!zdev->zbus->bus)
708*4882a593Smuzhiyun 		return;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	pdev = pci_get_slot(zbus->bus, zdev->devfn);
711*4882a593Smuzhiyun 	if (pdev) {
712*4882a593Smuzhiyun 		if (set_error)
713*4882a593Smuzhiyun 			pdev->error_state = pci_channel_io_perm_failure;
714*4882a593Smuzhiyun 		if (pdev->is_virtfn) {
715*4882a593Smuzhiyun 			zpci_iov_remove_virtfn(pdev, zdev->vfn);
716*4882a593Smuzhiyun 			/* balance pci_get_slot */
717*4882a593Smuzhiyun 			pci_dev_put(pdev);
718*4882a593Smuzhiyun 			return;
719*4882a593Smuzhiyun 		}
720*4882a593Smuzhiyun 		pci_stop_and_remove_bus_device_locked(pdev);
721*4882a593Smuzhiyun 		/* balance pci_get_slot */
722*4882a593Smuzhiyun 		pci_dev_put(pdev);
723*4882a593Smuzhiyun 	}
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun /**
727*4882a593Smuzhiyun  * zpci_create_device() - Create a new zpci_dev and add it to the zbus
728*4882a593Smuzhiyun  * @fid: Function ID of the device to be created
729*4882a593Smuzhiyun  * @fh: Current Function Handle of the device to be created
730*4882a593Smuzhiyun  * @state: Initial state after creation either Standby or Configured
731*4882a593Smuzhiyun  *
732*4882a593Smuzhiyun  * Creates a new zpci device and adds it to its, possibly newly created, zbus
733*4882a593Smuzhiyun  * as well as zpci_list.
734*4882a593Smuzhiyun  *
735*4882a593Smuzhiyun  * Returns: 0 on success, an error value otherwise
736*4882a593Smuzhiyun  */
zpci_create_device(u32 fid,u32 fh,enum zpci_state state)737*4882a593Smuzhiyun int zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun 	struct zpci_dev *zdev;
740*4882a593Smuzhiyun 	int rc;
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
743*4882a593Smuzhiyun 	zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
744*4882a593Smuzhiyun 	if (!zdev)
745*4882a593Smuzhiyun 		return -ENOMEM;
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	/* FID and Function Handle are the static/dynamic identifiers */
748*4882a593Smuzhiyun 	zdev->fid = fid;
749*4882a593Smuzhiyun 	zdev->fh = fh;
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	/* Query function properties and update zdev */
752*4882a593Smuzhiyun 	rc = clp_query_pci_fn(zdev);
753*4882a593Smuzhiyun 	if (rc)
754*4882a593Smuzhiyun 		goto error;
755*4882a593Smuzhiyun 	zdev->state =  state;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	kref_init(&zdev->kref);
758*4882a593Smuzhiyun 	mutex_init(&zdev->lock);
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	rc = zpci_init_iommu(zdev);
761*4882a593Smuzhiyun 	if (rc)
762*4882a593Smuzhiyun 		goto error;
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
765*4882a593Smuzhiyun 		rc = zpci_enable_device(zdev);
766*4882a593Smuzhiyun 		if (rc)
767*4882a593Smuzhiyun 			goto error_destroy_iommu;
768*4882a593Smuzhiyun 	}
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	rc = zpci_bus_device_register(zdev, &pci_root_ops);
771*4882a593Smuzhiyun 	if (rc)
772*4882a593Smuzhiyun 		goto error_disable;
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	spin_lock(&zpci_list_lock);
775*4882a593Smuzhiyun 	list_add_tail(&zdev->entry, &zpci_list);
776*4882a593Smuzhiyun 	spin_unlock(&zpci_list_lock);
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	return 0;
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun error_disable:
781*4882a593Smuzhiyun 	if (zdev->state == ZPCI_FN_STATE_ONLINE)
782*4882a593Smuzhiyun 		zpci_disable_device(zdev);
783*4882a593Smuzhiyun error_destroy_iommu:
784*4882a593Smuzhiyun 	zpci_destroy_iommu(zdev);
785*4882a593Smuzhiyun error:
786*4882a593Smuzhiyun 	zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
787*4882a593Smuzhiyun 	kfree(zdev);
788*4882a593Smuzhiyun 	return rc;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun 
zpci_is_device_configured(struct zpci_dev * zdev)791*4882a593Smuzhiyun bool zpci_is_device_configured(struct zpci_dev *zdev)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun 	enum zpci_state state = zdev->state;
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	return state != ZPCI_FN_STATE_RESERVED &&
796*4882a593Smuzhiyun 		state != ZPCI_FN_STATE_STANDBY;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun /**
800*4882a593Smuzhiyun  * zpci_device_reserved() - Mark device as resverved
801*4882a593Smuzhiyun  * @zdev: the zpci_dev that was reserved
802*4882a593Smuzhiyun  *
803*4882a593Smuzhiyun  * Handle the case that a given zPCI function was reserved by another system.
804*4882a593Smuzhiyun  * After a call to this function the zpci_dev can not be found via
805*4882a593Smuzhiyun  * get_zdev_by_fid() anymore but may still be accessible via existing
806*4882a593Smuzhiyun  * references though it will not be functional anymore.
807*4882a593Smuzhiyun  */
zpci_device_reserved(struct zpci_dev * zdev)808*4882a593Smuzhiyun void zpci_device_reserved(struct zpci_dev *zdev)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun 	if (zdev->has_hp_slot)
811*4882a593Smuzhiyun 		zpci_exit_slot(zdev);
812*4882a593Smuzhiyun 	/*
813*4882a593Smuzhiyun 	 * Remove device from zpci_list as it is going away. This also
814*4882a593Smuzhiyun 	 * makes sure we ignore subsequent zPCI events for this device.
815*4882a593Smuzhiyun 	 */
816*4882a593Smuzhiyun 	spin_lock(&zpci_list_lock);
817*4882a593Smuzhiyun 	list_del(&zdev->entry);
818*4882a593Smuzhiyun 	spin_unlock(&zpci_list_lock);
819*4882a593Smuzhiyun 	zdev->state = ZPCI_FN_STATE_RESERVED;
820*4882a593Smuzhiyun 	zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
821*4882a593Smuzhiyun 	zpci_zdev_put(zdev);
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun 
zpci_release_device(struct kref * kref)824*4882a593Smuzhiyun void zpci_release_device(struct kref *kref)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun 	struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	if (zdev->zbus->bus)
829*4882a593Smuzhiyun 		zpci_remove_device(zdev, false);
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	switch (zdev->state) {
832*4882a593Smuzhiyun 	case ZPCI_FN_STATE_ONLINE:
833*4882a593Smuzhiyun 	case ZPCI_FN_STATE_CONFIGURED:
834*4882a593Smuzhiyun 		zpci_disable_device(zdev);
835*4882a593Smuzhiyun 		fallthrough;
836*4882a593Smuzhiyun 	case ZPCI_FN_STATE_STANDBY:
837*4882a593Smuzhiyun 		if (zdev->has_hp_slot)
838*4882a593Smuzhiyun 			zpci_exit_slot(zdev);
839*4882a593Smuzhiyun 		spin_lock(&zpci_list_lock);
840*4882a593Smuzhiyun 		list_del(&zdev->entry);
841*4882a593Smuzhiyun 		spin_unlock(&zpci_list_lock);
842*4882a593Smuzhiyun 		zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
843*4882a593Smuzhiyun 		fallthrough;
844*4882a593Smuzhiyun 	case ZPCI_FN_STATE_RESERVED:
845*4882a593Smuzhiyun 		zpci_cleanup_bus_resources(zdev);
846*4882a593Smuzhiyun 		zpci_bus_device_unregister(zdev);
847*4882a593Smuzhiyun 		zpci_destroy_iommu(zdev);
848*4882a593Smuzhiyun 		fallthrough;
849*4882a593Smuzhiyun 	default:
850*4882a593Smuzhiyun 		break;
851*4882a593Smuzhiyun 	}
852*4882a593Smuzhiyun 	zpci_dbg(3, "rem fid:%x\n", zdev->fid);
853*4882a593Smuzhiyun 	kfree(zdev);
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun 
zpci_report_error(struct pci_dev * pdev,struct zpci_report_error_header * report)856*4882a593Smuzhiyun int zpci_report_error(struct pci_dev *pdev,
857*4882a593Smuzhiyun 		      struct zpci_report_error_header *report)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun 	struct zpci_dev *zdev = to_zpci(pdev);
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	return sclp_pci_report(report, zdev->fh, zdev->fid);
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun EXPORT_SYMBOL(zpci_report_error);
864*4882a593Smuzhiyun 
zpci_mem_init(void)865*4882a593Smuzhiyun static int zpci_mem_init(void)
866*4882a593Smuzhiyun {
867*4882a593Smuzhiyun 	BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
868*4882a593Smuzhiyun 		     __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
871*4882a593Smuzhiyun 					   __alignof__(struct zpci_fmb), 0, NULL);
872*4882a593Smuzhiyun 	if (!zdev_fmb_cache)
873*4882a593Smuzhiyun 		goto error_fmb;
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
876*4882a593Smuzhiyun 				   sizeof(*zpci_iomap_start), GFP_KERNEL);
877*4882a593Smuzhiyun 	if (!zpci_iomap_start)
878*4882a593Smuzhiyun 		goto error_iomap;
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
881*4882a593Smuzhiyun 				    sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
882*4882a593Smuzhiyun 	if (!zpci_iomap_bitmap)
883*4882a593Smuzhiyun 		goto error_iomap_bitmap;
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	if (static_branch_likely(&have_mio))
886*4882a593Smuzhiyun 		clp_setup_writeback_mio();
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	return 0;
889*4882a593Smuzhiyun error_iomap_bitmap:
890*4882a593Smuzhiyun 	kfree(zpci_iomap_start);
891*4882a593Smuzhiyun error_iomap:
892*4882a593Smuzhiyun 	kmem_cache_destroy(zdev_fmb_cache);
893*4882a593Smuzhiyun error_fmb:
894*4882a593Smuzhiyun 	return -ENOMEM;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun 
zpci_mem_exit(void)897*4882a593Smuzhiyun static void zpci_mem_exit(void)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun 	kfree(zpci_iomap_bitmap);
900*4882a593Smuzhiyun 	kfree(zpci_iomap_start);
901*4882a593Smuzhiyun 	kmem_cache_destroy(zdev_fmb_cache);
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun static unsigned int s390_pci_probe __initdata = 1;
905*4882a593Smuzhiyun unsigned int s390_pci_force_floating __initdata;
906*4882a593Smuzhiyun static unsigned int s390_pci_initialized;
907*4882a593Smuzhiyun 
pcibios_setup(char * str)908*4882a593Smuzhiyun char * __init pcibios_setup(char *str)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun 	if (!strcmp(str, "off")) {
911*4882a593Smuzhiyun 		s390_pci_probe = 0;
912*4882a593Smuzhiyun 		return NULL;
913*4882a593Smuzhiyun 	}
914*4882a593Smuzhiyun 	if (!strcmp(str, "nomio")) {
915*4882a593Smuzhiyun 		S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
916*4882a593Smuzhiyun 		return NULL;
917*4882a593Smuzhiyun 	}
918*4882a593Smuzhiyun 	if (!strcmp(str, "force_floating")) {
919*4882a593Smuzhiyun 		s390_pci_force_floating = 1;
920*4882a593Smuzhiyun 		return NULL;
921*4882a593Smuzhiyun 	}
922*4882a593Smuzhiyun 	if (!strcmp(str, "norid")) {
923*4882a593Smuzhiyun 		s390_pci_no_rid = 1;
924*4882a593Smuzhiyun 		return NULL;
925*4882a593Smuzhiyun 	}
926*4882a593Smuzhiyun 	return str;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun 
zpci_is_enabled(void)929*4882a593Smuzhiyun bool zpci_is_enabled(void)
930*4882a593Smuzhiyun {
931*4882a593Smuzhiyun 	return s390_pci_initialized;
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun 
pci_base_init(void)934*4882a593Smuzhiyun static int __init pci_base_init(void)
935*4882a593Smuzhiyun {
936*4882a593Smuzhiyun 	int rc;
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	if (!s390_pci_probe)
939*4882a593Smuzhiyun 		return 0;
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 	if (!test_facility(69) || !test_facility(71))
942*4882a593Smuzhiyun 		return 0;
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	if (MACHINE_HAS_PCI_MIO) {
945*4882a593Smuzhiyun 		static_branch_enable(&have_mio);
946*4882a593Smuzhiyun 		ctl_set_bit(2, 5);
947*4882a593Smuzhiyun 	}
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	rc = zpci_debug_init();
950*4882a593Smuzhiyun 	if (rc)
951*4882a593Smuzhiyun 		goto out;
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	rc = zpci_mem_init();
954*4882a593Smuzhiyun 	if (rc)
955*4882a593Smuzhiyun 		goto out_mem;
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	rc = zpci_irq_init();
958*4882a593Smuzhiyun 	if (rc)
959*4882a593Smuzhiyun 		goto out_irq;
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	rc = zpci_dma_init();
962*4882a593Smuzhiyun 	if (rc)
963*4882a593Smuzhiyun 		goto out_dma;
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	rc = clp_scan_pci_devices();
966*4882a593Smuzhiyun 	if (rc)
967*4882a593Smuzhiyun 		goto out_find;
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	s390_pci_initialized = 1;
970*4882a593Smuzhiyun 	return 0;
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun out_find:
973*4882a593Smuzhiyun 	zpci_dma_exit();
974*4882a593Smuzhiyun out_dma:
975*4882a593Smuzhiyun 	zpci_irq_exit();
976*4882a593Smuzhiyun out_irq:
977*4882a593Smuzhiyun 	zpci_mem_exit();
978*4882a593Smuzhiyun out_mem:
979*4882a593Smuzhiyun 	zpci_debug_exit();
980*4882a593Smuzhiyun out:
981*4882a593Smuzhiyun 	return rc;
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun subsys_initcall_sync(pci_base_init);
984