1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /* Generic I/O port emulation.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5*4882a593Smuzhiyun * Written by David Howells (dhowells@redhat.com)
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun #ifndef __ASM_GENERIC_IO_H
8*4882a593Smuzhiyun #define __ASM_GENERIC_IO_H
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <asm/page.h> /* I/O is all done through memory accesses */
11*4882a593Smuzhiyun #include <linux/string.h> /* for memset() and memcpy() */
12*4882a593Smuzhiyun #include <linux/types.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #ifdef CONFIG_GENERIC_IOMAP
15*4882a593Smuzhiyun #include <asm-generic/iomap.h>
16*4882a593Smuzhiyun #endif
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <asm/mmiowb.h>
19*4882a593Smuzhiyun #include <asm-generic/pci_iomap.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #ifndef __io_br
22*4882a593Smuzhiyun #define __io_br() barrier()
23*4882a593Smuzhiyun #endif
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /* prevent prefetching of coherent DMA data ahead of a dma-complete */
26*4882a593Smuzhiyun #ifndef __io_ar
27*4882a593Smuzhiyun #ifdef rmb
28*4882a593Smuzhiyun #define __io_ar(v) rmb()
29*4882a593Smuzhiyun #else
30*4882a593Smuzhiyun #define __io_ar(v) barrier()
31*4882a593Smuzhiyun #endif
32*4882a593Smuzhiyun #endif
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /* flush writes to coherent DMA data before possibly triggering a DMA read */
35*4882a593Smuzhiyun #ifndef __io_bw
36*4882a593Smuzhiyun #ifdef wmb
37*4882a593Smuzhiyun #define __io_bw() wmb()
38*4882a593Smuzhiyun #else
39*4882a593Smuzhiyun #define __io_bw() barrier()
40*4882a593Smuzhiyun #endif
41*4882a593Smuzhiyun #endif
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /* serialize device access against a spin_unlock, usually handled there. */
44*4882a593Smuzhiyun #ifndef __io_aw
45*4882a593Smuzhiyun #define __io_aw() mmiowb_set_pending()
46*4882a593Smuzhiyun #endif
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun #ifndef __io_pbw
49*4882a593Smuzhiyun #define __io_pbw() __io_bw()
50*4882a593Smuzhiyun #endif
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #ifndef __io_paw
53*4882a593Smuzhiyun #define __io_paw() __io_aw()
54*4882a593Smuzhiyun #endif
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #ifndef __io_pbr
57*4882a593Smuzhiyun #define __io_pbr() __io_br()
58*4882a593Smuzhiyun #endif
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun #ifndef __io_par
61*4882a593Smuzhiyun #define __io_par(v) __io_ar(v)
62*4882a593Smuzhiyun #endif
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun * On some architectures memory mapped IO needs to be accessed differently.
69*4882a593Smuzhiyun * On the simple architectures, we just read/write the memory location
70*4882a593Smuzhiyun * directly.
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun #ifndef __raw_readb
74*4882a593Smuzhiyun #define __raw_readb __raw_readb
__raw_readb(const volatile void __iomem * addr)75*4882a593Smuzhiyun static inline u8 __raw_readb(const volatile void __iomem *addr)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun return *(const volatile u8 __force *)addr;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun #endif
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #ifndef __raw_readw
82*4882a593Smuzhiyun #define __raw_readw __raw_readw
__raw_readw(const volatile void __iomem * addr)83*4882a593Smuzhiyun static inline u16 __raw_readw(const volatile void __iomem *addr)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun return *(const volatile u16 __force *)addr;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun #endif
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #ifndef __raw_readl
90*4882a593Smuzhiyun #define __raw_readl __raw_readl
__raw_readl(const volatile void __iomem * addr)91*4882a593Smuzhiyun static inline u32 __raw_readl(const volatile void __iomem *addr)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun return *(const volatile u32 __force *)addr;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun #endif
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun #ifdef CONFIG_64BIT
98*4882a593Smuzhiyun #ifndef __raw_readq
99*4882a593Smuzhiyun #define __raw_readq __raw_readq
__raw_readq(const volatile void __iomem * addr)100*4882a593Smuzhiyun static inline u64 __raw_readq(const volatile void __iomem *addr)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun return *(const volatile u64 __force *)addr;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun #endif
105*4882a593Smuzhiyun #endif /* CONFIG_64BIT */
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun #ifndef __raw_writeb
108*4882a593Smuzhiyun #define __raw_writeb __raw_writeb
__raw_writeb(u8 value,volatile void __iomem * addr)109*4882a593Smuzhiyun static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun *(volatile u8 __force *)addr = value;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun #endif
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun #ifndef __raw_writew
116*4882a593Smuzhiyun #define __raw_writew __raw_writew
__raw_writew(u16 value,volatile void __iomem * addr)117*4882a593Smuzhiyun static inline void __raw_writew(u16 value, volatile void __iomem *addr)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun *(volatile u16 __force *)addr = value;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun #endif
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun #ifndef __raw_writel
124*4882a593Smuzhiyun #define __raw_writel __raw_writel
__raw_writel(u32 value,volatile void __iomem * addr)125*4882a593Smuzhiyun static inline void __raw_writel(u32 value, volatile void __iomem *addr)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun *(volatile u32 __force *)addr = value;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun #endif
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun #ifdef CONFIG_64BIT
132*4882a593Smuzhiyun #ifndef __raw_writeq
133*4882a593Smuzhiyun #define __raw_writeq __raw_writeq
__raw_writeq(u64 value,volatile void __iomem * addr)134*4882a593Smuzhiyun static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun *(volatile u64 __force *)addr = value;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun #endif
139*4882a593Smuzhiyun #endif /* CONFIG_64BIT */
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /*
142*4882a593Smuzhiyun * {read,write}{b,w,l,q}() access little endian memory and return result in
143*4882a593Smuzhiyun * native endianness.
144*4882a593Smuzhiyun */
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun #ifndef readb
147*4882a593Smuzhiyun #define readb readb
readb(const volatile void __iomem * addr)148*4882a593Smuzhiyun static inline u8 readb(const volatile void __iomem *addr)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun u8 val;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun __io_br();
153*4882a593Smuzhiyun val = __raw_readb(addr);
154*4882a593Smuzhiyun __io_ar(val);
155*4882a593Smuzhiyun return val;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun #endif
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun #ifndef readw
160*4882a593Smuzhiyun #define readw readw
readw(const volatile void __iomem * addr)161*4882a593Smuzhiyun static inline u16 readw(const volatile void __iomem *addr)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun u16 val;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun __io_br();
166*4882a593Smuzhiyun val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
167*4882a593Smuzhiyun __io_ar(val);
168*4882a593Smuzhiyun return val;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun #endif
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun #ifndef readl
173*4882a593Smuzhiyun #define readl readl
readl(const volatile void __iomem * addr)174*4882a593Smuzhiyun static inline u32 readl(const volatile void __iomem *addr)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun u32 val;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun __io_br();
179*4882a593Smuzhiyun val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
180*4882a593Smuzhiyun __io_ar(val);
181*4882a593Smuzhiyun return val;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun #endif
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun #ifdef CONFIG_64BIT
186*4882a593Smuzhiyun #ifndef readq
187*4882a593Smuzhiyun #define readq readq
readq(const volatile void __iomem * addr)188*4882a593Smuzhiyun static inline u64 readq(const volatile void __iomem *addr)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun u64 val;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun __io_br();
193*4882a593Smuzhiyun val = __le64_to_cpu(__raw_readq(addr));
194*4882a593Smuzhiyun __io_ar(val);
195*4882a593Smuzhiyun return val;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun #endif
198*4882a593Smuzhiyun #endif /* CONFIG_64BIT */
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun #ifndef writeb
201*4882a593Smuzhiyun #define writeb writeb
writeb(u8 value,volatile void __iomem * addr)202*4882a593Smuzhiyun static inline void writeb(u8 value, volatile void __iomem *addr)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun __io_bw();
205*4882a593Smuzhiyun __raw_writeb(value, addr);
206*4882a593Smuzhiyun __io_aw();
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun #endif
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun #ifndef writew
211*4882a593Smuzhiyun #define writew writew
writew(u16 value,volatile void __iomem * addr)212*4882a593Smuzhiyun static inline void writew(u16 value, volatile void __iomem *addr)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun __io_bw();
215*4882a593Smuzhiyun __raw_writew((u16 __force)cpu_to_le16(value), addr);
216*4882a593Smuzhiyun __io_aw();
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun #endif
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun #ifndef writel
221*4882a593Smuzhiyun #define writel writel
writel(u32 value,volatile void __iomem * addr)222*4882a593Smuzhiyun static inline void writel(u32 value, volatile void __iomem *addr)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun __io_bw();
225*4882a593Smuzhiyun __raw_writel((u32 __force)__cpu_to_le32(value), addr);
226*4882a593Smuzhiyun __io_aw();
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun #endif
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun #ifdef CONFIG_64BIT
231*4882a593Smuzhiyun #ifndef writeq
232*4882a593Smuzhiyun #define writeq writeq
writeq(u64 value,volatile void __iomem * addr)233*4882a593Smuzhiyun static inline void writeq(u64 value, volatile void __iomem *addr)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun __io_bw();
236*4882a593Smuzhiyun __raw_writeq(__cpu_to_le64(value), addr);
237*4882a593Smuzhiyun __io_aw();
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun #endif
240*4882a593Smuzhiyun #endif /* CONFIG_64BIT */
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /*
243*4882a593Smuzhiyun * {read,write}{b,w,l,q}_relaxed() are like the regular version, but
244*4882a593Smuzhiyun * are not guaranteed to provide ordering against spinlocks or memory
245*4882a593Smuzhiyun * accesses.
246*4882a593Smuzhiyun */
247*4882a593Smuzhiyun #ifndef readb_relaxed
248*4882a593Smuzhiyun #define readb_relaxed readb_relaxed
readb_relaxed(const volatile void __iomem * addr)249*4882a593Smuzhiyun static inline u8 readb_relaxed(const volatile void __iomem *addr)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun return __raw_readb(addr);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun #endif
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun #ifndef readw_relaxed
256*4882a593Smuzhiyun #define readw_relaxed readw_relaxed
readw_relaxed(const volatile void __iomem * addr)257*4882a593Smuzhiyun static inline u16 readw_relaxed(const volatile void __iomem *addr)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun return __le16_to_cpu(__raw_readw(addr));
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun #endif
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun #ifndef readl_relaxed
264*4882a593Smuzhiyun #define readl_relaxed readl_relaxed
readl_relaxed(const volatile void __iomem * addr)265*4882a593Smuzhiyun static inline u32 readl_relaxed(const volatile void __iomem *addr)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun return __le32_to_cpu(__raw_readl(addr));
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun #endif
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun #if defined(readq) && !defined(readq_relaxed)
272*4882a593Smuzhiyun #define readq_relaxed readq_relaxed
readq_relaxed(const volatile void __iomem * addr)273*4882a593Smuzhiyun static inline u64 readq_relaxed(const volatile void __iomem *addr)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun return __le64_to_cpu(__raw_readq(addr));
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun #endif
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun #ifndef writeb_relaxed
280*4882a593Smuzhiyun #define writeb_relaxed writeb_relaxed
writeb_relaxed(u8 value,volatile void __iomem * addr)281*4882a593Smuzhiyun static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun __raw_writeb(value, addr);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun #endif
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun #ifndef writew_relaxed
288*4882a593Smuzhiyun #define writew_relaxed writew_relaxed
writew_relaxed(u16 value,volatile void __iomem * addr)289*4882a593Smuzhiyun static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun __raw_writew(cpu_to_le16(value), addr);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun #endif
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun #ifndef writel_relaxed
296*4882a593Smuzhiyun #define writel_relaxed writel_relaxed
writel_relaxed(u32 value,volatile void __iomem * addr)297*4882a593Smuzhiyun static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun __raw_writel(__cpu_to_le32(value), addr);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun #endif
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun #if defined(writeq) && !defined(writeq_relaxed)
304*4882a593Smuzhiyun #define writeq_relaxed writeq_relaxed
writeq_relaxed(u64 value,volatile void __iomem * addr)305*4882a593Smuzhiyun static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun __raw_writeq(__cpu_to_le64(value), addr);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun #endif
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /*
312*4882a593Smuzhiyun * {read,write}s{b,w,l,q}() repeatedly access the same memory address in
313*4882a593Smuzhiyun * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times).
314*4882a593Smuzhiyun */
315*4882a593Smuzhiyun #ifndef readsb
316*4882a593Smuzhiyun #define readsb readsb
readsb(const volatile void __iomem * addr,void * buffer,unsigned int count)317*4882a593Smuzhiyun static inline void readsb(const volatile void __iomem *addr, void *buffer,
318*4882a593Smuzhiyun unsigned int count)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun if (count) {
321*4882a593Smuzhiyun u8 *buf = buffer;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun do {
324*4882a593Smuzhiyun u8 x = __raw_readb(addr);
325*4882a593Smuzhiyun *buf++ = x;
326*4882a593Smuzhiyun } while (--count);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun #endif
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun #ifndef readsw
332*4882a593Smuzhiyun #define readsw readsw
readsw(const volatile void __iomem * addr,void * buffer,unsigned int count)333*4882a593Smuzhiyun static inline void readsw(const volatile void __iomem *addr, void *buffer,
334*4882a593Smuzhiyun unsigned int count)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun if (count) {
337*4882a593Smuzhiyun u16 *buf = buffer;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun do {
340*4882a593Smuzhiyun u16 x = __raw_readw(addr);
341*4882a593Smuzhiyun *buf++ = x;
342*4882a593Smuzhiyun } while (--count);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun #endif
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun #ifndef readsl
348*4882a593Smuzhiyun #define readsl readsl
readsl(const volatile void __iomem * addr,void * buffer,unsigned int count)349*4882a593Smuzhiyun static inline void readsl(const volatile void __iomem *addr, void *buffer,
350*4882a593Smuzhiyun unsigned int count)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun if (count) {
353*4882a593Smuzhiyun u32 *buf = buffer;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun do {
356*4882a593Smuzhiyun u32 x = __raw_readl(addr);
357*4882a593Smuzhiyun *buf++ = x;
358*4882a593Smuzhiyun } while (--count);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun #endif
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun #ifdef CONFIG_64BIT
364*4882a593Smuzhiyun #ifndef readsq
365*4882a593Smuzhiyun #define readsq readsq
readsq(const volatile void __iomem * addr,void * buffer,unsigned int count)366*4882a593Smuzhiyun static inline void readsq(const volatile void __iomem *addr, void *buffer,
367*4882a593Smuzhiyun unsigned int count)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun if (count) {
370*4882a593Smuzhiyun u64 *buf = buffer;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun do {
373*4882a593Smuzhiyun u64 x = __raw_readq(addr);
374*4882a593Smuzhiyun *buf++ = x;
375*4882a593Smuzhiyun } while (--count);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun #endif
379*4882a593Smuzhiyun #endif /* CONFIG_64BIT */
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun #ifndef writesb
382*4882a593Smuzhiyun #define writesb writesb
writesb(volatile void __iomem * addr,const void * buffer,unsigned int count)383*4882a593Smuzhiyun static inline void writesb(volatile void __iomem *addr, const void *buffer,
384*4882a593Smuzhiyun unsigned int count)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun if (count) {
387*4882a593Smuzhiyun const u8 *buf = buffer;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun do {
390*4882a593Smuzhiyun __raw_writeb(*buf++, addr);
391*4882a593Smuzhiyun } while (--count);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun #endif
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun #ifndef writesw
397*4882a593Smuzhiyun #define writesw writesw
writesw(volatile void __iomem * addr,const void * buffer,unsigned int count)398*4882a593Smuzhiyun static inline void writesw(volatile void __iomem *addr, const void *buffer,
399*4882a593Smuzhiyun unsigned int count)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun if (count) {
402*4882a593Smuzhiyun const u16 *buf = buffer;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun do {
405*4882a593Smuzhiyun __raw_writew(*buf++, addr);
406*4882a593Smuzhiyun } while (--count);
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun #endif
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun #ifndef writesl
412*4882a593Smuzhiyun #define writesl writesl
writesl(volatile void __iomem * addr,const void * buffer,unsigned int count)413*4882a593Smuzhiyun static inline void writesl(volatile void __iomem *addr, const void *buffer,
414*4882a593Smuzhiyun unsigned int count)
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun if (count) {
417*4882a593Smuzhiyun const u32 *buf = buffer;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun do {
420*4882a593Smuzhiyun __raw_writel(*buf++, addr);
421*4882a593Smuzhiyun } while (--count);
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun #endif
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun #ifdef CONFIG_64BIT
427*4882a593Smuzhiyun #ifndef writesq
428*4882a593Smuzhiyun #define writesq writesq
writesq(volatile void __iomem * addr,const void * buffer,unsigned int count)429*4882a593Smuzhiyun static inline void writesq(volatile void __iomem *addr, const void *buffer,
430*4882a593Smuzhiyun unsigned int count)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun if (count) {
433*4882a593Smuzhiyun const u64 *buf = buffer;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun do {
436*4882a593Smuzhiyun __raw_writeq(*buf++, addr);
437*4882a593Smuzhiyun } while (--count);
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun #endif
441*4882a593Smuzhiyun #endif /* CONFIG_64BIT */
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun #ifndef PCI_IOBASE
444*4882a593Smuzhiyun #define PCI_IOBASE ((void __iomem *)0)
445*4882a593Smuzhiyun #endif
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun #ifndef IO_SPACE_LIMIT
448*4882a593Smuzhiyun #define IO_SPACE_LIMIT 0xffff
449*4882a593Smuzhiyun #endif
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun /*
452*4882a593Smuzhiyun * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
453*4882a593Smuzhiyun * implemented on hardware that needs an additional delay for I/O accesses to
454*4882a593Smuzhiyun * take effect.
455*4882a593Smuzhiyun */
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun #if !defined(inb) && !defined(_inb)
458*4882a593Smuzhiyun #define _inb _inb
_inb(unsigned long addr)459*4882a593Smuzhiyun static inline u8 _inb(unsigned long addr)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun u8 val;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun __io_pbr();
464*4882a593Smuzhiyun val = __raw_readb(PCI_IOBASE + addr);
465*4882a593Smuzhiyun __io_par(val);
466*4882a593Smuzhiyun return val;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun #endif
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun #if !defined(inw) && !defined(_inw)
471*4882a593Smuzhiyun #define _inw _inw
_inw(unsigned long addr)472*4882a593Smuzhiyun static inline u16 _inw(unsigned long addr)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun u16 val;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun __io_pbr();
477*4882a593Smuzhiyun val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr));
478*4882a593Smuzhiyun __io_par(val);
479*4882a593Smuzhiyun return val;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun #endif
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun #if !defined(inl) && !defined(_inl)
484*4882a593Smuzhiyun #define _inl _inl
_inl(unsigned long addr)485*4882a593Smuzhiyun static inline u32 _inl(unsigned long addr)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun u32 val;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun __io_pbr();
490*4882a593Smuzhiyun val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
491*4882a593Smuzhiyun __io_par(val);
492*4882a593Smuzhiyun return val;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun #endif
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun #if !defined(outb) && !defined(_outb)
497*4882a593Smuzhiyun #define _outb _outb
_outb(u8 value,unsigned long addr)498*4882a593Smuzhiyun static inline void _outb(u8 value, unsigned long addr)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun __io_pbw();
501*4882a593Smuzhiyun __raw_writeb(value, PCI_IOBASE + addr);
502*4882a593Smuzhiyun __io_paw();
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun #endif
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun #if !defined(outw) && !defined(_outw)
507*4882a593Smuzhiyun #define _outw _outw
_outw(u16 value,unsigned long addr)508*4882a593Smuzhiyun static inline void _outw(u16 value, unsigned long addr)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun __io_pbw();
511*4882a593Smuzhiyun __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr);
512*4882a593Smuzhiyun __io_paw();
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun #endif
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun #if !defined(outl) && !defined(_outl)
517*4882a593Smuzhiyun #define _outl _outl
_outl(u32 value,unsigned long addr)518*4882a593Smuzhiyun static inline void _outl(u32 value, unsigned long addr)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun __io_pbw();
521*4882a593Smuzhiyun __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr);
522*4882a593Smuzhiyun __io_paw();
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun #endif
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun #include <linux/logic_pio.h>
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun #ifndef inb
529*4882a593Smuzhiyun #define inb _inb
530*4882a593Smuzhiyun #endif
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun #ifndef inw
533*4882a593Smuzhiyun #define inw _inw
534*4882a593Smuzhiyun #endif
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun #ifndef inl
537*4882a593Smuzhiyun #define inl _inl
538*4882a593Smuzhiyun #endif
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun #ifndef outb
541*4882a593Smuzhiyun #define outb _outb
542*4882a593Smuzhiyun #endif
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun #ifndef outw
545*4882a593Smuzhiyun #define outw _outw
546*4882a593Smuzhiyun #endif
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun #ifndef outl
549*4882a593Smuzhiyun #define outl _outl
550*4882a593Smuzhiyun #endif
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun #ifndef inb_p
553*4882a593Smuzhiyun #define inb_p inb_p
inb_p(unsigned long addr)554*4882a593Smuzhiyun static inline u8 inb_p(unsigned long addr)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun return inb(addr);
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun #endif
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun #ifndef inw_p
561*4882a593Smuzhiyun #define inw_p inw_p
inw_p(unsigned long addr)562*4882a593Smuzhiyun static inline u16 inw_p(unsigned long addr)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun return inw(addr);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun #endif
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun #ifndef inl_p
569*4882a593Smuzhiyun #define inl_p inl_p
inl_p(unsigned long addr)570*4882a593Smuzhiyun static inline u32 inl_p(unsigned long addr)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun return inl(addr);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun #endif
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun #ifndef outb_p
577*4882a593Smuzhiyun #define outb_p outb_p
outb_p(u8 value,unsigned long addr)578*4882a593Smuzhiyun static inline void outb_p(u8 value, unsigned long addr)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun outb(value, addr);
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun #endif
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun #ifndef outw_p
585*4882a593Smuzhiyun #define outw_p outw_p
outw_p(u16 value,unsigned long addr)586*4882a593Smuzhiyun static inline void outw_p(u16 value, unsigned long addr)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun outw(value, addr);
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun #endif
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun #ifndef outl_p
593*4882a593Smuzhiyun #define outl_p outl_p
outl_p(u32 value,unsigned long addr)594*4882a593Smuzhiyun static inline void outl_p(u32 value, unsigned long addr)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun outl(value, addr);
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun #endif
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun /*
601*4882a593Smuzhiyun * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a
602*4882a593Smuzhiyun * single I/O port multiple times.
603*4882a593Smuzhiyun */
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun #ifndef insb
606*4882a593Smuzhiyun #define insb insb
insb(unsigned long addr,void * buffer,unsigned int count)607*4882a593Smuzhiyun static inline void insb(unsigned long addr, void *buffer, unsigned int count)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun readsb(PCI_IOBASE + addr, buffer, count);
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun #endif
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun #ifndef insw
614*4882a593Smuzhiyun #define insw insw
insw(unsigned long addr,void * buffer,unsigned int count)615*4882a593Smuzhiyun static inline void insw(unsigned long addr, void *buffer, unsigned int count)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun readsw(PCI_IOBASE + addr, buffer, count);
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun #endif
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun #ifndef insl
622*4882a593Smuzhiyun #define insl insl
insl(unsigned long addr,void * buffer,unsigned int count)623*4882a593Smuzhiyun static inline void insl(unsigned long addr, void *buffer, unsigned int count)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun readsl(PCI_IOBASE + addr, buffer, count);
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun #endif
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun #ifndef outsb
630*4882a593Smuzhiyun #define outsb outsb
outsb(unsigned long addr,const void * buffer,unsigned int count)631*4882a593Smuzhiyun static inline void outsb(unsigned long addr, const void *buffer,
632*4882a593Smuzhiyun unsigned int count)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun writesb(PCI_IOBASE + addr, buffer, count);
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun #endif
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun #ifndef outsw
639*4882a593Smuzhiyun #define outsw outsw
outsw(unsigned long addr,const void * buffer,unsigned int count)640*4882a593Smuzhiyun static inline void outsw(unsigned long addr, const void *buffer,
641*4882a593Smuzhiyun unsigned int count)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun writesw(PCI_IOBASE + addr, buffer, count);
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun #endif
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun #ifndef outsl
648*4882a593Smuzhiyun #define outsl outsl
outsl(unsigned long addr,const void * buffer,unsigned int count)649*4882a593Smuzhiyun static inline void outsl(unsigned long addr, const void *buffer,
650*4882a593Smuzhiyun unsigned int count)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun writesl(PCI_IOBASE + addr, buffer, count);
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun #endif
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun #ifndef insb_p
657*4882a593Smuzhiyun #define insb_p insb_p
insb_p(unsigned long addr,void * buffer,unsigned int count)658*4882a593Smuzhiyun static inline void insb_p(unsigned long addr, void *buffer, unsigned int count)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun insb(addr, buffer, count);
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun #endif
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun #ifndef insw_p
665*4882a593Smuzhiyun #define insw_p insw_p
insw_p(unsigned long addr,void * buffer,unsigned int count)666*4882a593Smuzhiyun static inline void insw_p(unsigned long addr, void *buffer, unsigned int count)
667*4882a593Smuzhiyun {
668*4882a593Smuzhiyun insw(addr, buffer, count);
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun #endif
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun #ifndef insl_p
673*4882a593Smuzhiyun #define insl_p insl_p
insl_p(unsigned long addr,void * buffer,unsigned int count)674*4882a593Smuzhiyun static inline void insl_p(unsigned long addr, void *buffer, unsigned int count)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun insl(addr, buffer, count);
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun #endif
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun #ifndef outsb_p
681*4882a593Smuzhiyun #define outsb_p outsb_p
outsb_p(unsigned long addr,const void * buffer,unsigned int count)682*4882a593Smuzhiyun static inline void outsb_p(unsigned long addr, const void *buffer,
683*4882a593Smuzhiyun unsigned int count)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun outsb(addr, buffer, count);
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun #endif
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun #ifndef outsw_p
690*4882a593Smuzhiyun #define outsw_p outsw_p
outsw_p(unsigned long addr,const void * buffer,unsigned int count)691*4882a593Smuzhiyun static inline void outsw_p(unsigned long addr, const void *buffer,
692*4882a593Smuzhiyun unsigned int count)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun outsw(addr, buffer, count);
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun #endif
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun #ifndef outsl_p
699*4882a593Smuzhiyun #define outsl_p outsl_p
outsl_p(unsigned long addr,const void * buffer,unsigned int count)700*4882a593Smuzhiyun static inline void outsl_p(unsigned long addr, const void *buffer,
701*4882a593Smuzhiyun unsigned int count)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun outsl(addr, buffer, count);
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun #endif
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun #ifndef CONFIG_GENERIC_IOMAP
708*4882a593Smuzhiyun #ifndef ioread8
709*4882a593Smuzhiyun #define ioread8 ioread8
ioread8(const volatile void __iomem * addr)710*4882a593Smuzhiyun static inline u8 ioread8(const volatile void __iomem *addr)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun return readb(addr);
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun #endif
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun #ifndef ioread16
717*4882a593Smuzhiyun #define ioread16 ioread16
ioread16(const volatile void __iomem * addr)718*4882a593Smuzhiyun static inline u16 ioread16(const volatile void __iomem *addr)
719*4882a593Smuzhiyun {
720*4882a593Smuzhiyun return readw(addr);
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun #endif
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun #ifndef ioread32
725*4882a593Smuzhiyun #define ioread32 ioread32
ioread32(const volatile void __iomem * addr)726*4882a593Smuzhiyun static inline u32 ioread32(const volatile void __iomem *addr)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun return readl(addr);
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun #endif
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun #ifdef CONFIG_64BIT
733*4882a593Smuzhiyun #ifndef ioread64
734*4882a593Smuzhiyun #define ioread64 ioread64
ioread64(const volatile void __iomem * addr)735*4882a593Smuzhiyun static inline u64 ioread64(const volatile void __iomem *addr)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun return readq(addr);
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun #endif
740*4882a593Smuzhiyun #endif /* CONFIG_64BIT */
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun #ifndef iowrite8
743*4882a593Smuzhiyun #define iowrite8 iowrite8
iowrite8(u8 value,volatile void __iomem * addr)744*4882a593Smuzhiyun static inline void iowrite8(u8 value, volatile void __iomem *addr)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun writeb(value, addr);
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun #endif
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun #ifndef iowrite16
751*4882a593Smuzhiyun #define iowrite16 iowrite16
iowrite16(u16 value,volatile void __iomem * addr)752*4882a593Smuzhiyun static inline void iowrite16(u16 value, volatile void __iomem *addr)
753*4882a593Smuzhiyun {
754*4882a593Smuzhiyun writew(value, addr);
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun #endif
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun #ifndef iowrite32
759*4882a593Smuzhiyun #define iowrite32 iowrite32
iowrite32(u32 value,volatile void __iomem * addr)760*4882a593Smuzhiyun static inline void iowrite32(u32 value, volatile void __iomem *addr)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun writel(value, addr);
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun #endif
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun #ifdef CONFIG_64BIT
767*4882a593Smuzhiyun #ifndef iowrite64
768*4882a593Smuzhiyun #define iowrite64 iowrite64
iowrite64(u64 value,volatile void __iomem * addr)769*4882a593Smuzhiyun static inline void iowrite64(u64 value, volatile void __iomem *addr)
770*4882a593Smuzhiyun {
771*4882a593Smuzhiyun writeq(value, addr);
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun #endif
774*4882a593Smuzhiyun #endif /* CONFIG_64BIT */
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun #ifndef ioread16be
777*4882a593Smuzhiyun #define ioread16be ioread16be
ioread16be(const volatile void __iomem * addr)778*4882a593Smuzhiyun static inline u16 ioread16be(const volatile void __iomem *addr)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun return swab16(readw(addr));
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun #endif
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun #ifndef ioread32be
785*4882a593Smuzhiyun #define ioread32be ioread32be
ioread32be(const volatile void __iomem * addr)786*4882a593Smuzhiyun static inline u32 ioread32be(const volatile void __iomem *addr)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun return swab32(readl(addr));
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun #endif
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun #ifdef CONFIG_64BIT
793*4882a593Smuzhiyun #ifndef ioread64be
794*4882a593Smuzhiyun #define ioread64be ioread64be
ioread64be(const volatile void __iomem * addr)795*4882a593Smuzhiyun static inline u64 ioread64be(const volatile void __iomem *addr)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun return swab64(readq(addr));
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun #endif
800*4882a593Smuzhiyun #endif /* CONFIG_64BIT */
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun #ifndef iowrite16be
803*4882a593Smuzhiyun #define iowrite16be iowrite16be
iowrite16be(u16 value,void volatile __iomem * addr)804*4882a593Smuzhiyun static inline void iowrite16be(u16 value, void volatile __iomem *addr)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun writew(swab16(value), addr);
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun #endif
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun #ifndef iowrite32be
811*4882a593Smuzhiyun #define iowrite32be iowrite32be
iowrite32be(u32 value,volatile void __iomem * addr)812*4882a593Smuzhiyun static inline void iowrite32be(u32 value, volatile void __iomem *addr)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun writel(swab32(value), addr);
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun #endif
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun #ifdef CONFIG_64BIT
819*4882a593Smuzhiyun #ifndef iowrite64be
820*4882a593Smuzhiyun #define iowrite64be iowrite64be
iowrite64be(u64 value,volatile void __iomem * addr)821*4882a593Smuzhiyun static inline void iowrite64be(u64 value, volatile void __iomem *addr)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun writeq(swab64(value), addr);
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun #endif
826*4882a593Smuzhiyun #endif /* CONFIG_64BIT */
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun #ifndef ioread8_rep
829*4882a593Smuzhiyun #define ioread8_rep ioread8_rep
ioread8_rep(const volatile void __iomem * addr,void * buffer,unsigned int count)830*4882a593Smuzhiyun static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer,
831*4882a593Smuzhiyun unsigned int count)
832*4882a593Smuzhiyun {
833*4882a593Smuzhiyun readsb(addr, buffer, count);
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun #endif
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun #ifndef ioread16_rep
838*4882a593Smuzhiyun #define ioread16_rep ioread16_rep
ioread16_rep(const volatile void __iomem * addr,void * buffer,unsigned int count)839*4882a593Smuzhiyun static inline void ioread16_rep(const volatile void __iomem *addr,
840*4882a593Smuzhiyun void *buffer, unsigned int count)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun readsw(addr, buffer, count);
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun #endif
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun #ifndef ioread32_rep
847*4882a593Smuzhiyun #define ioread32_rep ioread32_rep
ioread32_rep(const volatile void __iomem * addr,void * buffer,unsigned int count)848*4882a593Smuzhiyun static inline void ioread32_rep(const volatile void __iomem *addr,
849*4882a593Smuzhiyun void *buffer, unsigned int count)
850*4882a593Smuzhiyun {
851*4882a593Smuzhiyun readsl(addr, buffer, count);
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun #endif
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun #ifdef CONFIG_64BIT
856*4882a593Smuzhiyun #ifndef ioread64_rep
857*4882a593Smuzhiyun #define ioread64_rep ioread64_rep
ioread64_rep(const volatile void __iomem * addr,void * buffer,unsigned int count)858*4882a593Smuzhiyun static inline void ioread64_rep(const volatile void __iomem *addr,
859*4882a593Smuzhiyun void *buffer, unsigned int count)
860*4882a593Smuzhiyun {
861*4882a593Smuzhiyun readsq(addr, buffer, count);
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun #endif
864*4882a593Smuzhiyun #endif /* CONFIG_64BIT */
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun #ifndef iowrite8_rep
867*4882a593Smuzhiyun #define iowrite8_rep iowrite8_rep
iowrite8_rep(volatile void __iomem * addr,const void * buffer,unsigned int count)868*4882a593Smuzhiyun static inline void iowrite8_rep(volatile void __iomem *addr,
869*4882a593Smuzhiyun const void *buffer,
870*4882a593Smuzhiyun unsigned int count)
871*4882a593Smuzhiyun {
872*4882a593Smuzhiyun writesb(addr, buffer, count);
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun #endif
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun #ifndef iowrite16_rep
877*4882a593Smuzhiyun #define iowrite16_rep iowrite16_rep
iowrite16_rep(volatile void __iomem * addr,const void * buffer,unsigned int count)878*4882a593Smuzhiyun static inline void iowrite16_rep(volatile void __iomem *addr,
879*4882a593Smuzhiyun const void *buffer,
880*4882a593Smuzhiyun unsigned int count)
881*4882a593Smuzhiyun {
882*4882a593Smuzhiyun writesw(addr, buffer, count);
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun #endif
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun #ifndef iowrite32_rep
887*4882a593Smuzhiyun #define iowrite32_rep iowrite32_rep
iowrite32_rep(volatile void __iomem * addr,const void * buffer,unsigned int count)888*4882a593Smuzhiyun static inline void iowrite32_rep(volatile void __iomem *addr,
889*4882a593Smuzhiyun const void *buffer,
890*4882a593Smuzhiyun unsigned int count)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun writesl(addr, buffer, count);
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun #endif
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun #ifdef CONFIG_64BIT
897*4882a593Smuzhiyun #ifndef iowrite64_rep
898*4882a593Smuzhiyun #define iowrite64_rep iowrite64_rep
iowrite64_rep(volatile void __iomem * addr,const void * buffer,unsigned int count)899*4882a593Smuzhiyun static inline void iowrite64_rep(volatile void __iomem *addr,
900*4882a593Smuzhiyun const void *buffer,
901*4882a593Smuzhiyun unsigned int count)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun writesq(addr, buffer, count);
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun #endif
906*4882a593Smuzhiyun #endif /* CONFIG_64BIT */
907*4882a593Smuzhiyun #endif /* CONFIG_GENERIC_IOMAP */
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun #ifdef __KERNEL__
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun #include <linux/vmalloc.h>
912*4882a593Smuzhiyun #define __io_virt(x) ((void __force *)(x))
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun /*
915*4882a593Smuzhiyun * Change virtual addresses to physical addresses and vv.
916*4882a593Smuzhiyun * These are pretty trivial
917*4882a593Smuzhiyun */
918*4882a593Smuzhiyun #ifndef virt_to_phys
919*4882a593Smuzhiyun #define virt_to_phys virt_to_phys
virt_to_phys(volatile void * address)920*4882a593Smuzhiyun static inline unsigned long virt_to_phys(volatile void *address)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun return __pa((unsigned long)address);
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun #endif
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun #ifndef phys_to_virt
927*4882a593Smuzhiyun #define phys_to_virt phys_to_virt
phys_to_virt(unsigned long address)928*4882a593Smuzhiyun static inline void *phys_to_virt(unsigned long address)
929*4882a593Smuzhiyun {
930*4882a593Smuzhiyun return __va(address);
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun #endif
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun /**
935*4882a593Smuzhiyun * DOC: ioremap() and ioremap_*() variants
936*4882a593Smuzhiyun *
937*4882a593Smuzhiyun * Architectures with an MMU are expected to provide ioremap() and iounmap()
938*4882a593Smuzhiyun * themselves or rely on GENERIC_IOREMAP. For NOMMU architectures we provide
939*4882a593Smuzhiyun * a default nop-op implementation that expect that the physical address used
940*4882a593Smuzhiyun * for MMIO are already marked as uncached, and can be used as kernel virtual
941*4882a593Smuzhiyun * addresses.
942*4882a593Smuzhiyun *
943*4882a593Smuzhiyun * ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes
944*4882a593Smuzhiyun * for specific drivers if the architecture choses to implement them. If they
945*4882a593Smuzhiyun * are not implemented we fall back to plain ioremap.
946*4882a593Smuzhiyun */
947*4882a593Smuzhiyun #ifndef CONFIG_MMU
948*4882a593Smuzhiyun #ifndef ioremap
949*4882a593Smuzhiyun #define ioremap ioremap
ioremap(phys_addr_t offset,size_t size)950*4882a593Smuzhiyun static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
951*4882a593Smuzhiyun {
952*4882a593Smuzhiyun return (void __iomem *)(unsigned long)offset;
953*4882a593Smuzhiyun }
954*4882a593Smuzhiyun #endif
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun #ifndef iounmap
957*4882a593Smuzhiyun #define iounmap iounmap
iounmap(void __iomem * addr)958*4882a593Smuzhiyun static inline void iounmap(void __iomem *addr)
959*4882a593Smuzhiyun {
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun #endif
962*4882a593Smuzhiyun #elif defined(CONFIG_GENERIC_IOREMAP)
963*4882a593Smuzhiyun #include <linux/pgtable.h>
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot);
966*4882a593Smuzhiyun void iounmap(volatile void __iomem *addr);
967*4882a593Smuzhiyun
ioremap(phys_addr_t addr,size_t size)968*4882a593Smuzhiyun static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
969*4882a593Smuzhiyun {
970*4882a593Smuzhiyun /* _PAGE_IOREMAP needs to be supplied by the architecture */
971*4882a593Smuzhiyun return ioremap_prot(addr, size, _PAGE_IOREMAP);
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun #endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun #ifndef ioremap_wc
976*4882a593Smuzhiyun #define ioremap_wc ioremap
977*4882a593Smuzhiyun #endif
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun #ifndef ioremap_wt
980*4882a593Smuzhiyun #define ioremap_wt ioremap
981*4882a593Smuzhiyun #endif
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun /*
984*4882a593Smuzhiyun * ioremap_uc is special in that we do require an explicit architecture
985*4882a593Smuzhiyun * implementation. In general you do not want to use this function in a
986*4882a593Smuzhiyun * driver and use plain ioremap, which is uncached by default. Similarly
987*4882a593Smuzhiyun * architectures should not implement it unless they have a very good
988*4882a593Smuzhiyun * reason.
989*4882a593Smuzhiyun */
990*4882a593Smuzhiyun #ifndef ioremap_uc
991*4882a593Smuzhiyun #define ioremap_uc ioremap_uc
ioremap_uc(phys_addr_t offset,size_t size)992*4882a593Smuzhiyun static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun return NULL;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun #endif
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun #ifdef CONFIG_HAS_IOPORT_MAP
999*4882a593Smuzhiyun #ifndef CONFIG_GENERIC_IOMAP
1000*4882a593Smuzhiyun #ifndef ioport_map
1001*4882a593Smuzhiyun #define ioport_map ioport_map
ioport_map(unsigned long port,unsigned int nr)1002*4882a593Smuzhiyun static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
1003*4882a593Smuzhiyun {
1004*4882a593Smuzhiyun port &= IO_SPACE_LIMIT;
1005*4882a593Smuzhiyun return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun #define __pci_ioport_unmap __pci_ioport_unmap
__pci_ioport_unmap(void __iomem * p)1008*4882a593Smuzhiyun static inline void __pci_ioport_unmap(void __iomem *p)
1009*4882a593Smuzhiyun {
1010*4882a593Smuzhiyun uintptr_t start = (uintptr_t) PCI_IOBASE;
1011*4882a593Smuzhiyun uintptr_t addr = (uintptr_t) p;
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun if (addr >= start && addr < start + IO_SPACE_LIMIT)
1014*4882a593Smuzhiyun return;
1015*4882a593Smuzhiyun iounmap(p);
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun #endif
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun #ifndef ioport_unmap
1020*4882a593Smuzhiyun #define ioport_unmap ioport_unmap
ioport_unmap(void __iomem * p)1021*4882a593Smuzhiyun static inline void ioport_unmap(void __iomem *p)
1022*4882a593Smuzhiyun {
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun #endif
1025*4882a593Smuzhiyun #else /* CONFIG_GENERIC_IOMAP */
1026*4882a593Smuzhiyun extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
1027*4882a593Smuzhiyun extern void ioport_unmap(void __iomem *p);
1028*4882a593Smuzhiyun #endif /* CONFIG_GENERIC_IOMAP */
1029*4882a593Smuzhiyun #endif /* CONFIG_HAS_IOPORT_MAP */
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun #ifndef CONFIG_GENERIC_IOMAP
1032*4882a593Smuzhiyun struct pci_dev;
1033*4882a593Smuzhiyun extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun #ifndef __pci_ioport_unmap
__pci_ioport_unmap(void __iomem * p)1036*4882a593Smuzhiyun static inline void __pci_ioport_unmap(void __iomem *p) {}
1037*4882a593Smuzhiyun #endif
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun #ifndef pci_iounmap
1040*4882a593Smuzhiyun #define pci_iounmap pci_iounmap
pci_iounmap(struct pci_dev * dev,void __iomem * p)1041*4882a593Smuzhiyun static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun __pci_ioport_unmap(p);
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun #endif
1046*4882a593Smuzhiyun #endif /* CONFIG_GENERIC_IOMAP */
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun /*
1049*4882a593Smuzhiyun * Convert a virtual cached pointer to an uncached pointer
1050*4882a593Smuzhiyun */
1051*4882a593Smuzhiyun #ifndef xlate_dev_kmem_ptr
1052*4882a593Smuzhiyun #define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
xlate_dev_kmem_ptr(void * addr)1053*4882a593Smuzhiyun static inline void *xlate_dev_kmem_ptr(void *addr)
1054*4882a593Smuzhiyun {
1055*4882a593Smuzhiyun return addr;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun #endif
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun #ifndef xlate_dev_mem_ptr
1060*4882a593Smuzhiyun #define xlate_dev_mem_ptr xlate_dev_mem_ptr
xlate_dev_mem_ptr(phys_addr_t addr)1061*4882a593Smuzhiyun static inline void *xlate_dev_mem_ptr(phys_addr_t addr)
1062*4882a593Smuzhiyun {
1063*4882a593Smuzhiyun return __va(addr);
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun #endif
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun #ifndef unxlate_dev_mem_ptr
1068*4882a593Smuzhiyun #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
unxlate_dev_mem_ptr(phys_addr_t phys,void * addr)1069*4882a593Smuzhiyun static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
1070*4882a593Smuzhiyun {
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun #endif
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun #ifdef CONFIG_VIRT_TO_BUS
1075*4882a593Smuzhiyun #ifndef virt_to_bus
virt_to_bus(void * address)1076*4882a593Smuzhiyun static inline unsigned long virt_to_bus(void *address)
1077*4882a593Smuzhiyun {
1078*4882a593Smuzhiyun return (unsigned long)address;
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun
bus_to_virt(unsigned long address)1081*4882a593Smuzhiyun static inline void *bus_to_virt(unsigned long address)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun return (void *)address;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun #endif
1086*4882a593Smuzhiyun #endif
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun #ifndef memset_io
1089*4882a593Smuzhiyun #define memset_io memset_io
1090*4882a593Smuzhiyun /**
1091*4882a593Smuzhiyun * memset_io Set a range of I/O memory to a constant value
1092*4882a593Smuzhiyun * @addr: The beginning of the I/O-memory range to set
1093*4882a593Smuzhiyun * @val: The value to set the memory to
1094*4882a593Smuzhiyun * @count: The number of bytes to set
1095*4882a593Smuzhiyun *
1096*4882a593Smuzhiyun * Set a range of I/O memory to a given value.
1097*4882a593Smuzhiyun */
memset_io(volatile void __iomem * addr,int value,size_t size)1098*4882a593Smuzhiyun static inline void memset_io(volatile void __iomem *addr, int value,
1099*4882a593Smuzhiyun size_t size)
1100*4882a593Smuzhiyun {
1101*4882a593Smuzhiyun memset(__io_virt(addr), value, size);
1102*4882a593Smuzhiyun }
1103*4882a593Smuzhiyun #endif
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun #ifndef memcpy_fromio
1106*4882a593Smuzhiyun #define memcpy_fromio memcpy_fromio
1107*4882a593Smuzhiyun /**
1108*4882a593Smuzhiyun * memcpy_fromio Copy a block of data from I/O memory
1109*4882a593Smuzhiyun * @dst: The (RAM) destination for the copy
1110*4882a593Smuzhiyun * @src: The (I/O memory) source for the data
1111*4882a593Smuzhiyun * @count: The number of bytes to copy
1112*4882a593Smuzhiyun *
1113*4882a593Smuzhiyun * Copy a block of data from I/O memory.
1114*4882a593Smuzhiyun */
memcpy_fromio(void * buffer,const volatile void __iomem * addr,size_t size)1115*4882a593Smuzhiyun static inline void memcpy_fromio(void *buffer,
1116*4882a593Smuzhiyun const volatile void __iomem *addr,
1117*4882a593Smuzhiyun size_t size)
1118*4882a593Smuzhiyun {
1119*4882a593Smuzhiyun memcpy(buffer, __io_virt(addr), size);
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun #endif
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun #ifndef memcpy_toio
1124*4882a593Smuzhiyun #define memcpy_toio memcpy_toio
1125*4882a593Smuzhiyun /**
1126*4882a593Smuzhiyun * memcpy_toio Copy a block of data into I/O memory
1127*4882a593Smuzhiyun * @dst: The (I/O memory) destination for the copy
1128*4882a593Smuzhiyun * @src: The (RAM) source for the data
1129*4882a593Smuzhiyun * @count: The number of bytes to copy
1130*4882a593Smuzhiyun *
1131*4882a593Smuzhiyun * Copy a block of data to I/O memory.
1132*4882a593Smuzhiyun */
memcpy_toio(volatile void __iomem * addr,const void * buffer,size_t size)1133*4882a593Smuzhiyun static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
1134*4882a593Smuzhiyun size_t size)
1135*4882a593Smuzhiyun {
1136*4882a593Smuzhiyun memcpy(__io_virt(addr), buffer, size);
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun #endif
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun #endif /* __KERNEL__ */
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun #endif /* __ASM_GENERIC_IO_H */
1143