xref: /OK3568_Linux_fs/kernel/arch/s390/pci/pci_insn.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * s390 specific pci instructions
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright IBM Corp. 2013
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/export.h>
9*4882a593Smuzhiyun #include <linux/errno.h>
10*4882a593Smuzhiyun #include <linux/delay.h>
11*4882a593Smuzhiyun #include <linux/jump_label.h>
12*4882a593Smuzhiyun #include <asm/facility.h>
13*4882a593Smuzhiyun #include <asm/pci_insn.h>
14*4882a593Smuzhiyun #include <asm/pci_debug.h>
15*4882a593Smuzhiyun #include <asm/pci_io.h>
16*4882a593Smuzhiyun #include <asm/processor.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #define ZPCI_INSN_BUSY_DELAY	1	/* 1 microsecond */
19*4882a593Smuzhiyun 
zpci_err_insn(u8 cc,u8 status,u64 req,u64 offset)20*4882a593Smuzhiyun static inline void zpci_err_insn(u8 cc, u8 status, u64 req, u64 offset)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun 	struct {
23*4882a593Smuzhiyun 		u64 req;
24*4882a593Smuzhiyun 		u64 offset;
25*4882a593Smuzhiyun 		u8 cc;
26*4882a593Smuzhiyun 		u8 status;
27*4882a593Smuzhiyun 	} __packed data = {req, offset, cc, status};
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	zpci_err_hex(&data, sizeof(data));
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /* Modify PCI Function Controls */
__mpcifc(u64 req,struct zpci_fib * fib,u8 * status)33*4882a593Smuzhiyun static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun 	u8 cc;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	asm volatile (
38*4882a593Smuzhiyun 		"	.insn	rxy,0xe300000000d0,%[req],%[fib]\n"
39*4882a593Smuzhiyun 		"	ipm	%[cc]\n"
40*4882a593Smuzhiyun 		"	srl	%[cc],28\n"
41*4882a593Smuzhiyun 		: [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
42*4882a593Smuzhiyun 		: : "cc");
43*4882a593Smuzhiyun 	*status = req >> 24 & 0xff;
44*4882a593Smuzhiyun 	return cc;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
zpci_mod_fc(u64 req,struct zpci_fib * fib,u8 * status)47*4882a593Smuzhiyun u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	u8 cc;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	do {
52*4882a593Smuzhiyun 		cc = __mpcifc(req, fib, status);
53*4882a593Smuzhiyun 		if (cc == 2)
54*4882a593Smuzhiyun 			msleep(ZPCI_INSN_BUSY_DELAY);
55*4882a593Smuzhiyun 	} while (cc == 2);
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	if (cc)
58*4882a593Smuzhiyun 		zpci_err_insn(cc, *status, req, 0);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	return cc;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* Refresh PCI Translations */
__rpcit(u64 fn,u64 addr,u64 range,u8 * status)64*4882a593Smuzhiyun static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	register u64 __addr asm("2") = addr;
67*4882a593Smuzhiyun 	register u64 __range asm("3") = range;
68*4882a593Smuzhiyun 	u8 cc;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	asm volatile (
71*4882a593Smuzhiyun 		"	.insn	rre,0xb9d30000,%[fn],%[addr]\n"
72*4882a593Smuzhiyun 		"	ipm	%[cc]\n"
73*4882a593Smuzhiyun 		"	srl	%[cc],28\n"
74*4882a593Smuzhiyun 		: [cc] "=d" (cc), [fn] "+d" (fn)
75*4882a593Smuzhiyun 		: [addr] "d" (__addr), "d" (__range)
76*4882a593Smuzhiyun 		: "cc");
77*4882a593Smuzhiyun 	*status = fn >> 24 & 0xff;
78*4882a593Smuzhiyun 	return cc;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
zpci_refresh_trans(u64 fn,u64 addr,u64 range)81*4882a593Smuzhiyun int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	u8 cc, status;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	do {
86*4882a593Smuzhiyun 		cc = __rpcit(fn, addr, range, &status);
87*4882a593Smuzhiyun 		if (cc == 2)
88*4882a593Smuzhiyun 			udelay(ZPCI_INSN_BUSY_DELAY);
89*4882a593Smuzhiyun 	} while (cc == 2);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	if (cc)
92*4882a593Smuzhiyun 		zpci_err_insn(cc, status, addr, range);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	if (cc == 1 && (status == 4 || status == 16))
95*4882a593Smuzhiyun 		return -ENOMEM;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	return (cc) ? -EIO : 0;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun /* Set Interruption Controls */
__zpci_set_irq_ctrl(u16 ctl,u8 isc,union zpci_sic_iib * iib)101*4882a593Smuzhiyun int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	if (!test_facility(72))
104*4882a593Smuzhiyun 		return -EIO;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	asm volatile(
107*4882a593Smuzhiyun 		".insn	rsy,0xeb00000000d1,%[ctl],%[isc],%[iib]\n"
108*4882a593Smuzhiyun 		: : [ctl] "d" (ctl), [isc] "d" (isc << 27), [iib] "Q" (*iib));
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	return 0;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun /* PCI Load */
____pcilg(u64 * data,u64 req,u64 offset,u8 * status)114*4882a593Smuzhiyun static inline int ____pcilg(u64 *data, u64 req, u64 offset, u8 *status)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	register u64 __req asm("2") = req;
117*4882a593Smuzhiyun 	register u64 __offset asm("3") = offset;
118*4882a593Smuzhiyun 	int cc = -ENXIO;
119*4882a593Smuzhiyun 	u64 __data;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	asm volatile (
122*4882a593Smuzhiyun 		"	.insn	rre,0xb9d20000,%[data],%[req]\n"
123*4882a593Smuzhiyun 		"0:	ipm	%[cc]\n"
124*4882a593Smuzhiyun 		"	srl	%[cc],28\n"
125*4882a593Smuzhiyun 		"1:\n"
126*4882a593Smuzhiyun 		EX_TABLE(0b, 1b)
127*4882a593Smuzhiyun 		: [cc] "+d" (cc), [data] "=d" (__data), [req] "+d" (__req)
128*4882a593Smuzhiyun 		:  "d" (__offset)
129*4882a593Smuzhiyun 		: "cc");
130*4882a593Smuzhiyun 	*status = __req >> 24 & 0xff;
131*4882a593Smuzhiyun 	*data = __data;
132*4882a593Smuzhiyun 	return cc;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
__pcilg(u64 * data,u64 req,u64 offset,u8 * status)135*4882a593Smuzhiyun static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	u64 __data;
138*4882a593Smuzhiyun 	int cc;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	cc = ____pcilg(&__data, req, offset, status);
141*4882a593Smuzhiyun 	if (!cc)
142*4882a593Smuzhiyun 		*data = __data;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	return cc;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
__zpci_load(u64 * data,u64 req,u64 offset)147*4882a593Smuzhiyun int __zpci_load(u64 *data, u64 req, u64 offset)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	u8 status;
150*4882a593Smuzhiyun 	int cc;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	do {
153*4882a593Smuzhiyun 		cc = __pcilg(data, req, offset, &status);
154*4882a593Smuzhiyun 		if (cc == 2)
155*4882a593Smuzhiyun 			udelay(ZPCI_INSN_BUSY_DELAY);
156*4882a593Smuzhiyun 	} while (cc == 2);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	if (cc)
159*4882a593Smuzhiyun 		zpci_err_insn(cc, status, req, offset);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	return (cc > 0) ? -EIO : cc;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__zpci_load);
164*4882a593Smuzhiyun 
zpci_load_fh(u64 * data,const volatile void __iomem * addr,unsigned long len)165*4882a593Smuzhiyun static inline int zpci_load_fh(u64 *data, const volatile void __iomem *addr,
166*4882a593Smuzhiyun 			       unsigned long len)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
169*4882a593Smuzhiyun 	u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	return __zpci_load(data, req, ZPCI_OFFSET(addr));
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
__pcilg_mio(u64 * data,u64 ioaddr,u64 len,u8 * status)174*4882a593Smuzhiyun static inline int __pcilg_mio(u64 *data, u64 ioaddr, u64 len, u8 *status)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	register u64 addr asm("2") = ioaddr;
177*4882a593Smuzhiyun 	register u64 r3 asm("3") = len;
178*4882a593Smuzhiyun 	int cc = -ENXIO;
179*4882a593Smuzhiyun 	u64 __data;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	asm volatile (
182*4882a593Smuzhiyun 		"       .insn   rre,0xb9d60000,%[data],%[ioaddr]\n"
183*4882a593Smuzhiyun 		"0:     ipm     %[cc]\n"
184*4882a593Smuzhiyun 		"       srl     %[cc],28\n"
185*4882a593Smuzhiyun 		"1:\n"
186*4882a593Smuzhiyun 		EX_TABLE(0b, 1b)
187*4882a593Smuzhiyun 		: [cc] "+d" (cc), [data] "=d" (__data), "+d" (r3)
188*4882a593Smuzhiyun 		: [ioaddr] "d" (addr)
189*4882a593Smuzhiyun 		: "cc");
190*4882a593Smuzhiyun 	*status = r3 >> 24 & 0xff;
191*4882a593Smuzhiyun 	*data = __data;
192*4882a593Smuzhiyun 	return cc;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
zpci_load(u64 * data,const volatile void __iomem * addr,unsigned long len)195*4882a593Smuzhiyun int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	u8 status;
198*4882a593Smuzhiyun 	int cc;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	if (!static_branch_unlikely(&have_mio))
201*4882a593Smuzhiyun 		return zpci_load_fh(data, addr, len);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	cc = __pcilg_mio(data, (__force u64) addr, len, &status);
204*4882a593Smuzhiyun 	if (cc)
205*4882a593Smuzhiyun 		zpci_err_insn(cc, status, 0, (__force u64) addr);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	return (cc > 0) ? -EIO : cc;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(zpci_load);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun /* PCI Store */
__pcistg(u64 data,u64 req,u64 offset,u8 * status)212*4882a593Smuzhiyun static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	register u64 __req asm("2") = req;
215*4882a593Smuzhiyun 	register u64 __offset asm("3") = offset;
216*4882a593Smuzhiyun 	int cc = -ENXIO;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	asm volatile (
219*4882a593Smuzhiyun 		"	.insn	rre,0xb9d00000,%[data],%[req]\n"
220*4882a593Smuzhiyun 		"0:	ipm	%[cc]\n"
221*4882a593Smuzhiyun 		"	srl	%[cc],28\n"
222*4882a593Smuzhiyun 		"1:\n"
223*4882a593Smuzhiyun 		EX_TABLE(0b, 1b)
224*4882a593Smuzhiyun 		: [cc] "+d" (cc), [req] "+d" (__req)
225*4882a593Smuzhiyun 		: "d" (__offset), [data] "d" (data)
226*4882a593Smuzhiyun 		: "cc");
227*4882a593Smuzhiyun 	*status = __req >> 24 & 0xff;
228*4882a593Smuzhiyun 	return cc;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
__zpci_store(u64 data,u64 req,u64 offset)231*4882a593Smuzhiyun int __zpci_store(u64 data, u64 req, u64 offset)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	u8 status;
234*4882a593Smuzhiyun 	int cc;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	do {
237*4882a593Smuzhiyun 		cc = __pcistg(data, req, offset, &status);
238*4882a593Smuzhiyun 		if (cc == 2)
239*4882a593Smuzhiyun 			udelay(ZPCI_INSN_BUSY_DELAY);
240*4882a593Smuzhiyun 	} while (cc == 2);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	if (cc)
243*4882a593Smuzhiyun 		zpci_err_insn(cc, status, req, offset);
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	return (cc > 0) ? -EIO : cc;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__zpci_store);
248*4882a593Smuzhiyun 
zpci_store_fh(const volatile void __iomem * addr,u64 data,unsigned long len)249*4882a593Smuzhiyun static inline int zpci_store_fh(const volatile void __iomem *addr, u64 data,
250*4882a593Smuzhiyun 				unsigned long len)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
253*4882a593Smuzhiyun 	u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	return __zpci_store(data, req, ZPCI_OFFSET(addr));
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
__pcistg_mio(u64 data,u64 ioaddr,u64 len,u8 * status)258*4882a593Smuzhiyun static inline int __pcistg_mio(u64 data, u64 ioaddr, u64 len, u8 *status)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	register u64 addr asm("2") = ioaddr;
261*4882a593Smuzhiyun 	register u64 r3 asm("3") = len;
262*4882a593Smuzhiyun 	int cc = -ENXIO;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	asm volatile (
265*4882a593Smuzhiyun 		"       .insn   rre,0xb9d40000,%[data],%[ioaddr]\n"
266*4882a593Smuzhiyun 		"0:     ipm     %[cc]\n"
267*4882a593Smuzhiyun 		"       srl     %[cc],28\n"
268*4882a593Smuzhiyun 		"1:\n"
269*4882a593Smuzhiyun 		EX_TABLE(0b, 1b)
270*4882a593Smuzhiyun 		: [cc] "+d" (cc), "+d" (r3)
271*4882a593Smuzhiyun 		: [data] "d" (data), [ioaddr] "d" (addr)
272*4882a593Smuzhiyun 		: "cc");
273*4882a593Smuzhiyun 	*status = r3 >> 24 & 0xff;
274*4882a593Smuzhiyun 	return cc;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun 
zpci_store(const volatile void __iomem * addr,u64 data,unsigned long len)277*4882a593Smuzhiyun int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun 	u8 status;
280*4882a593Smuzhiyun 	int cc;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	if (!static_branch_unlikely(&have_mio))
283*4882a593Smuzhiyun 		return zpci_store_fh(addr, data, len);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	cc = __pcistg_mio(data, (__force u64) addr, len, &status);
286*4882a593Smuzhiyun 	if (cc)
287*4882a593Smuzhiyun 		zpci_err_insn(cc, status, 0, (__force u64) addr);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	return (cc > 0) ? -EIO : cc;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(zpci_store);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun /* PCI Store Block */
__pcistb(const u64 * data,u64 req,u64 offset,u8 * status)294*4882a593Smuzhiyun static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	int cc = -ENXIO;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	asm volatile (
299*4882a593Smuzhiyun 		"	.insn	rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
300*4882a593Smuzhiyun 		"0:	ipm	%[cc]\n"
301*4882a593Smuzhiyun 		"	srl	%[cc],28\n"
302*4882a593Smuzhiyun 		"1:\n"
303*4882a593Smuzhiyun 		EX_TABLE(0b, 1b)
304*4882a593Smuzhiyun 		: [cc] "+d" (cc), [req] "+d" (req)
305*4882a593Smuzhiyun 		: [offset] "d" (offset), [data] "Q" (*data)
306*4882a593Smuzhiyun 		: "cc");
307*4882a593Smuzhiyun 	*status = req >> 24 & 0xff;
308*4882a593Smuzhiyun 	return cc;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
__zpci_store_block(const u64 * data,u64 req,u64 offset)311*4882a593Smuzhiyun int __zpci_store_block(const u64 *data, u64 req, u64 offset)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	u8 status;
314*4882a593Smuzhiyun 	int cc;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	do {
317*4882a593Smuzhiyun 		cc = __pcistb(data, req, offset, &status);
318*4882a593Smuzhiyun 		if (cc == 2)
319*4882a593Smuzhiyun 			udelay(ZPCI_INSN_BUSY_DELAY);
320*4882a593Smuzhiyun 	} while (cc == 2);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	if (cc)
323*4882a593Smuzhiyun 		zpci_err_insn(cc, status, req, offset);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	return (cc > 0) ? -EIO : cc;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__zpci_store_block);
328*4882a593Smuzhiyun 
zpci_write_block_fh(volatile void __iomem * dst,const void * src,unsigned long len)329*4882a593Smuzhiyun static inline int zpci_write_block_fh(volatile void __iomem *dst,
330*4882a593Smuzhiyun 				      const void *src, unsigned long len)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun 	struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
333*4882a593Smuzhiyun 	u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
334*4882a593Smuzhiyun 	u64 offset = ZPCI_OFFSET(dst);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	return __zpci_store_block(src, req, offset);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
__pcistb_mio(const u64 * data,u64 ioaddr,u64 len,u8 * status)339*4882a593Smuzhiyun static inline int __pcistb_mio(const u64 *data, u64 ioaddr, u64 len, u8 *status)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	int cc = -ENXIO;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	asm volatile (
344*4882a593Smuzhiyun 		"       .insn   rsy,0xeb00000000d4,%[len],%[ioaddr],%[data]\n"
345*4882a593Smuzhiyun 		"0:     ipm     %[cc]\n"
346*4882a593Smuzhiyun 		"       srl     %[cc],28\n"
347*4882a593Smuzhiyun 		"1:\n"
348*4882a593Smuzhiyun 		EX_TABLE(0b, 1b)
349*4882a593Smuzhiyun 		: [cc] "+d" (cc), [len] "+d" (len)
350*4882a593Smuzhiyun 		: [ioaddr] "d" (ioaddr), [data] "Q" (*data)
351*4882a593Smuzhiyun 		: "cc");
352*4882a593Smuzhiyun 	*status = len >> 24 & 0xff;
353*4882a593Smuzhiyun 	return cc;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun 
zpci_write_block(volatile void __iomem * dst,const void * src,unsigned long len)356*4882a593Smuzhiyun int zpci_write_block(volatile void __iomem *dst,
357*4882a593Smuzhiyun 		     const void *src, unsigned long len)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	u8 status;
360*4882a593Smuzhiyun 	int cc;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	if (!static_branch_unlikely(&have_mio))
363*4882a593Smuzhiyun 		return zpci_write_block_fh(dst, src, len);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	cc = __pcistb_mio(src, (__force u64) dst, len, &status);
366*4882a593Smuzhiyun 	if (cc)
367*4882a593Smuzhiyun 		zpci_err_insn(cc, status, 0, (__force u64) dst);
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	return (cc > 0) ? -EIO : cc;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(zpci_write_block);
372*4882a593Smuzhiyun 
__pciwb_mio(void)373*4882a593Smuzhiyun static inline void __pciwb_mio(void)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun 	unsigned long unused = 0;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	asm volatile (".insn    rre,0xb9d50000,%[op],%[op]\n"
378*4882a593Smuzhiyun 		      : [op] "+d" (unused));
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun 
zpci_barrier(void)381*4882a593Smuzhiyun void zpci_barrier(void)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	if (static_branch_likely(&have_mio))
384*4882a593Smuzhiyun 		__pciwb_mio();
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(zpci_barrier);
387