1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _M68KNOMMU_IO_H
3*4882a593Smuzhiyun #define _M68KNOMMU_IO_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun * Convert a physical memory address into a IO memory address.
7*4882a593Smuzhiyun * For us this is trivially a type cast.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun #define iomem(a) ((void __iomem *) (a))
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun /*
12*4882a593Smuzhiyun * The non-MMU m68k and ColdFire IO and memory mapped hardware access
13*4882a593Smuzhiyun * functions have always worked in CPU native endian. We need to define
14*4882a593Smuzhiyun * that behavior here first before we include asm-generic/io.h.
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun #define __raw_readb(addr) \
17*4882a593Smuzhiyun ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
18*4882a593Smuzhiyun #define __raw_readw(addr) \
19*4882a593Smuzhiyun ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
20*4882a593Smuzhiyun #define __raw_readl(addr) \
21*4882a593Smuzhiyun ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #define __raw_writeb(b, addr) (void)((*(__force volatile u8 *) (addr)) = (b))
24*4882a593Smuzhiyun #define __raw_writew(b, addr) (void)((*(__force volatile u16 *) (addr)) = (b))
25*4882a593Smuzhiyun #define __raw_writel(b, addr) (void)((*(__force volatile u32 *) (addr)) = (b))
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #if defined(CONFIG_COLDFIRE)
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun * For ColdFire platforms we may need to do some extra checks for what
30*4882a593Smuzhiyun * type of address range we are accessing. Include the ColdFire platform
31*4882a593Smuzhiyun * definitions so we can figure out if need to do something special.
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun #include <asm/byteorder.h>
34*4882a593Smuzhiyun #include <asm/coldfire.h>
35*4882a593Smuzhiyun #include <asm/mcfsim.h>
36*4882a593Smuzhiyun #endif /* CONFIG_COLDFIRE */
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #if defined(IOMEMBASE)
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun * The ColdFire SoC internal peripherals are mapped into virtual address
41*4882a593Smuzhiyun * space using the ACR registers of the cache control unit. This means we
42*4882a593Smuzhiyun * are using a 1:1 physical:virtual mapping for them. We can quickly
43*4882a593Smuzhiyun * determine if we are accessing an internal peripheral device given the
44*4882a593Smuzhiyun * physical or vitrual address using the same range check. This check logic
45*4882a593Smuzhiyun * applies just the same of there is no MMU but something like a PCI bus
46*4882a593Smuzhiyun * is present.
47*4882a593Smuzhiyun */
__cf_internalio(unsigned long addr)48*4882a593Smuzhiyun static int __cf_internalio(unsigned long addr)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun return (addr >= IOMEMBASE) && (addr <= IOMEMBASE + IOMEMSIZE - 1);
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
cf_internalio(const volatile void __iomem * addr)53*4882a593Smuzhiyun static int cf_internalio(const volatile void __iomem *addr)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun return __cf_internalio((unsigned long) addr);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun * We need to treat built-in peripherals and bus based address ranges
60*4882a593Smuzhiyun * differently. Local built-in peripherals (and the ColdFire SoC parts
61*4882a593Smuzhiyun * have quite a lot of them) are always native endian - which is big
62*4882a593Smuzhiyun * endian on m68k/ColdFire. Bus based address ranges, like the PCI bus,
63*4882a593Smuzhiyun * are accessed little endian - so we need to byte swap those.
64*4882a593Smuzhiyun */
65*4882a593Smuzhiyun #define readw readw
readw(const volatile void __iomem * addr)66*4882a593Smuzhiyun static inline u16 readw(const volatile void __iomem *addr)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun if (cf_internalio(addr))
69*4882a593Smuzhiyun return __raw_readw(addr);
70*4882a593Smuzhiyun return swab16(__raw_readw(addr));
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun #define readl readl
readl(const volatile void __iomem * addr)74*4882a593Smuzhiyun static inline u32 readl(const volatile void __iomem *addr)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun if (cf_internalio(addr))
77*4882a593Smuzhiyun return __raw_readl(addr);
78*4882a593Smuzhiyun return swab32(__raw_readl(addr));
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #define writew writew
writew(u16 value,volatile void __iomem * addr)82*4882a593Smuzhiyun static inline void writew(u16 value, volatile void __iomem *addr)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun if (cf_internalio(addr))
85*4882a593Smuzhiyun __raw_writew(value, addr);
86*4882a593Smuzhiyun else
87*4882a593Smuzhiyun __raw_writew(swab16(value), addr);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun #define writel writel
writel(u32 value,volatile void __iomem * addr)91*4882a593Smuzhiyun static inline void writel(u32 value, volatile void __iomem *addr)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun if (cf_internalio(addr))
94*4882a593Smuzhiyun __raw_writel(value, addr);
95*4882a593Smuzhiyun else
96*4882a593Smuzhiyun __raw_writel(swab32(value), addr);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun #else
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun #define readb __raw_readb
102*4882a593Smuzhiyun #define readw __raw_readw
103*4882a593Smuzhiyun #define readl __raw_readl
104*4882a593Smuzhiyun #define writeb __raw_writeb
105*4882a593Smuzhiyun #define writew __raw_writew
106*4882a593Smuzhiyun #define writel __raw_writel
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun #endif /* IOMEMBASE */
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun #if defined(CONFIG_PCI)
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun * Support for PCI bus access uses the asm-generic access functions.
113*4882a593Smuzhiyun * We need to supply the base address and masks for the normal memory
114*4882a593Smuzhiyun * and IO address space mappings.
115*4882a593Smuzhiyun */
116*4882a593Smuzhiyun #define PCI_MEM_PA 0xf0000000 /* Host physical address */
117*4882a593Smuzhiyun #define PCI_MEM_BA 0xf0000000 /* Bus physical address */
118*4882a593Smuzhiyun #define PCI_MEM_SIZE 0x08000000 /* 128 MB */
119*4882a593Smuzhiyun #define PCI_MEM_MASK (PCI_MEM_SIZE - 1)
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun #define PCI_IO_PA 0xf8000000 /* Host physical address */
122*4882a593Smuzhiyun #define PCI_IO_BA 0x00000000 /* Bus physical address */
123*4882a593Smuzhiyun #define PCI_IO_SIZE 0x00010000 /* 64k */
124*4882a593Smuzhiyun #define PCI_IO_MASK (PCI_IO_SIZE - 1)
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun #define HAVE_ARCH_PIO_SIZE
127*4882a593Smuzhiyun #define PIO_OFFSET 0
128*4882a593Smuzhiyun #define PIO_MASK 0xffff
129*4882a593Smuzhiyun #define PIO_RESERVED 0x10000
130*4882a593Smuzhiyun #define PCI_IOBASE ((void __iomem *) PCI_IO_PA)
131*4882a593Smuzhiyun #define PCI_SPACE_LIMIT PCI_IO_MASK
132*4882a593Smuzhiyun #endif /* CONFIG_PCI */
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun #include <asm/kmap.h>
135*4882a593Smuzhiyun #include <asm/virtconvert.h>
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun #endif /* _M68KNOMMU_IO_H */
138