1819833afSPeter Tyser /* 2819833afSPeter Tyser * Copyright (C) 1994, 1995 Waldorf GmbH 3*23ff8633SDaniel Schwierzeck * Copyright (C) 1994 - 2000, 06 Ralf Baechle 4819833afSPeter Tyser * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 5*23ff8633SDaniel Schwierzeck * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. 6*23ff8633SDaniel Schwierzeck * Author: Maciej W. Rozycki <macro@mips.com> 7*23ff8633SDaniel Schwierzeck * 8*23ff8633SDaniel Schwierzeck * SPDX-License-Identifier: GPL-2.0 9819833afSPeter Tyser */ 10819833afSPeter Tyser #ifndef _ASM_IO_H 11819833afSPeter Tyser #define _ASM_IO_H 12819833afSPeter Tyser 13*23ff8633SDaniel Schwierzeck #include <linux/compiler.h> 14*23ff8633SDaniel Schwierzeck #include <linux/types.h> 15*23ff8633SDaniel Schwierzeck 16819833afSPeter Tyser #include <asm/addrspace.h> 17819833afSPeter Tyser #include <asm/byteorder.h> 18*23ff8633SDaniel Schwierzeck #include <asm/cpu-features.h> 19*23ff8633SDaniel Schwierzeck #include <asm/pgtable-bits.h> 20*23ff8633SDaniel Schwierzeck #include <asm/processor.h> 21*23ff8633SDaniel Schwierzeck #include <asm/string.h> 22*23ff8633SDaniel Schwierzeck 23*23ff8633SDaniel Schwierzeck #include <ioremap.h> 24*23ff8633SDaniel Schwierzeck #include <mangle-port.h> 25*23ff8633SDaniel Schwierzeck #include <spaces.h> 26819833afSPeter Tyser 27819833afSPeter Tyser /* 28819833afSPeter Tyser * Slowdown I/O port space accesses for antique hardware. 29819833afSPeter Tyser */ 30819833afSPeter Tyser #undef CONF_SLOWDOWN_IO 31819833afSPeter Tyser 32819833afSPeter Tyser /* 33*23ff8633SDaniel Schwierzeck * Raw operations are never swapped in software. OTOH values that raw 34*23ff8633SDaniel Schwierzeck * operations are working on may or may not have been swapped by the bus 35*23ff8633SDaniel Schwierzeck * hardware. An example use would be for flash memory that's used for 36*23ff8633SDaniel Schwierzeck * execute in place. 37819833afSPeter Tyser */ 38*23ff8633SDaniel Schwierzeck # define __raw_ioswabb(a, x) (x) 39*23ff8633SDaniel Schwierzeck # define __raw_ioswabw(a, x) (x) 40*23ff8633SDaniel Schwierzeck # define __raw_ioswabl(a, x) (x) 41*23ff8633SDaniel Schwierzeck # define __raw_ioswabq(a, x) (x) 42*23ff8633SDaniel Schwierzeck # define ____raw_ioswabq(a, x) (x) 43819833afSPeter Tyser 44*23ff8633SDaniel Schwierzeck /* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */ 45819833afSPeter Tyser 46*23ff8633SDaniel Schwierzeck #define IO_SPACE_LIMIT 0xffff 47819833afSPeter Tyser 48819833afSPeter Tyser /* 49819833afSPeter Tyser * On MIPS I/O ports are memory mapped, so we access them using normal 50819833afSPeter Tyser * load/store instructions. mips_io_port_base is the virtual address to 51819833afSPeter Tyser * which all ports are being mapped. For sake of efficiency some code 52819833afSPeter Tyser * assumes that this is an address that can be loaded with a single lui 53819833afSPeter Tyser * instruction, so the lower 16 bits must be zero. Should be true on 54819833afSPeter Tyser * on any sane architecture; generic code does not use this assumption. 55819833afSPeter Tyser */ 56819833afSPeter Tyser extern const unsigned long mips_io_port_base; 57819833afSPeter Tyser 58819833afSPeter Tyser /* 59819833afSPeter Tyser * Gcc will generate code to load the value of mips_io_port_base after each 60819833afSPeter Tyser * function call which may be fairly wasteful in some cases. So we don't 61819833afSPeter Tyser * play quite by the book. We tell gcc mips_io_port_base is a long variable 62819833afSPeter Tyser * which solves the code generation issue. Now we need to violate the 63819833afSPeter Tyser * aliasing rules a little to make initialization possible and finally we 64819833afSPeter Tyser * will need the barrier() to fight side effects of the aliasing chat. 65819833afSPeter Tyser * This trickery will eventually collapse under gcc's optimizer. Oh well. 66819833afSPeter Tyser */ 67819833afSPeter Tyser static inline void set_io_port_base(unsigned long base) 68819833afSPeter Tyser { 69819833afSPeter Tyser * (unsigned long *) &mips_io_port_base = base; 70*23ff8633SDaniel Schwierzeck barrier(); 71819833afSPeter Tyser } 72819833afSPeter Tyser 73819833afSPeter Tyser /* 74819833afSPeter Tyser * Thanks to James van Artsdalen for a better timing-fix than 75819833afSPeter Tyser * the two short jumps: using outb's to a nonexistent port seems 76819833afSPeter Tyser * to guarantee better timings even on fast machines. 77819833afSPeter Tyser * 78819833afSPeter Tyser * On the other hand, I'd like to be sure of a non-existent port: 79819833afSPeter Tyser * I feel a bit unsafe about using 0x80 (should be safe, though) 80819833afSPeter Tyser * 81819833afSPeter Tyser * Linus 82819833afSPeter Tyser * 83819833afSPeter Tyser */ 84819833afSPeter Tyser 85819833afSPeter Tyser #define __SLOW_DOWN_IO \ 86819833afSPeter Tyser __asm__ __volatile__( \ 87819833afSPeter Tyser "sb\t$0,0x80(%0)" \ 88819833afSPeter Tyser : : "r" (mips_io_port_base)); 89819833afSPeter Tyser 90819833afSPeter Tyser #ifdef CONF_SLOWDOWN_IO 91819833afSPeter Tyser #ifdef REALLY_SLOW_IO 92819833afSPeter Tyser #define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; } 93819833afSPeter Tyser #else 94819833afSPeter Tyser #define SLOW_DOWN_IO __SLOW_DOWN_IO 95819833afSPeter Tyser #endif 96819833afSPeter Tyser #else 97819833afSPeter Tyser #define SLOW_DOWN_IO 98819833afSPeter Tyser #endif 99819833afSPeter Tyser 100819833afSPeter Tyser /* 101*23ff8633SDaniel Schwierzeck * virt_to_phys - map virtual addresses to physical 102*23ff8633SDaniel Schwierzeck * @address: address to remap 103*23ff8633SDaniel Schwierzeck * 104*23ff8633SDaniel Schwierzeck * The returned physical address is the physical (CPU) mapping for 105*23ff8633SDaniel Schwierzeck * the memory address given. It is only valid to use this function on 106*23ff8633SDaniel Schwierzeck * addresses directly mapped or allocated via kmalloc. 107*23ff8633SDaniel Schwierzeck * 108*23ff8633SDaniel Schwierzeck * This function does not give bus mappings for DMA transfers. In 109*23ff8633SDaniel Schwierzeck * almost all conceivable cases a device driver should not be using 110*23ff8633SDaniel Schwierzeck * this function 111819833afSPeter Tyser */ 112*23ff8633SDaniel Schwierzeck static inline unsigned long virt_to_phys(volatile const void *address) 113819833afSPeter Tyser { 114*23ff8633SDaniel Schwierzeck unsigned long addr = (unsigned long)address; 115*23ff8633SDaniel Schwierzeck 116*23ff8633SDaniel Schwierzeck /* this corresponds to kernel implementation of __pa() */ 117*23ff8633SDaniel Schwierzeck #ifdef CONFIG_64BIT 118*23ff8633SDaniel Schwierzeck if (addr < CKSEG0) 119*23ff8633SDaniel Schwierzeck return XPHYSADDR(addr); 120*23ff8633SDaniel Schwierzeck 121*23ff8633SDaniel Schwierzeck return CPHYSADDR(addr); 122090854c8SZhi-zhou Zhang #else 123*23ff8633SDaniel Schwierzeck return addr - PAGE_OFFSET + PHYS_OFFSET; 124090854c8SZhi-zhou Zhang #endif 125819833afSPeter Tyser } 126819833afSPeter Tyser 127*23ff8633SDaniel Schwierzeck /* 128*23ff8633SDaniel Schwierzeck * phys_to_virt - map physical address to virtual 129*23ff8633SDaniel Schwierzeck * @address: address to remap 130*23ff8633SDaniel Schwierzeck * 131*23ff8633SDaniel Schwierzeck * The returned virtual address is a current CPU mapping for 132*23ff8633SDaniel Schwierzeck * the memory address given. It is only valid to use this function on 133*23ff8633SDaniel Schwierzeck * addresses that have a kernel mapping 134*23ff8633SDaniel Schwierzeck * 135*23ff8633SDaniel Schwierzeck * This function does not handle bus mappings for DMA transfers. In 136*23ff8633SDaniel Schwierzeck * almost all conceivable cases a device driver should not be using 137*23ff8633SDaniel Schwierzeck * this function 138*23ff8633SDaniel Schwierzeck */ 139b11c5d1dSDaniel Schwierzeck static inline void *phys_to_virt(unsigned long address) 140819833afSPeter Tyser { 141*23ff8633SDaniel Schwierzeck return (void *)(address + PAGE_OFFSET - PHYS_OFFSET); 142819833afSPeter Tyser } 143819833afSPeter Tyser 144819833afSPeter Tyser /* 145*23ff8633SDaniel Schwierzeck * ISA I/O bus memory addresses are 1:1 with the physical address. 146819833afSPeter Tyser */ 147*23ff8633SDaniel Schwierzeck static inline unsigned long isa_virt_to_bus(volatile void *address) 148819833afSPeter Tyser { 149*23ff8633SDaniel Schwierzeck return (unsigned long)address - PAGE_OFFSET; 150819833afSPeter Tyser } 151819833afSPeter Tyser 152*23ff8633SDaniel Schwierzeck static inline void *isa_bus_to_virt(unsigned long address) 153819833afSPeter Tyser { 154*23ff8633SDaniel Schwierzeck return (void *)(address + PAGE_OFFSET); 155819833afSPeter Tyser } 156819833afSPeter Tyser 157*23ff8633SDaniel Schwierzeck #define isa_page_to_bus page_to_phys 158819833afSPeter Tyser 159819833afSPeter Tyser /* 160*23ff8633SDaniel Schwierzeck * However PCI ones are not necessarily 1:1 and therefore these interfaces 161*23ff8633SDaniel Schwierzeck * are forbidden in portable PCI drivers. 162819833afSPeter Tyser * 163*23ff8633SDaniel Schwierzeck * Allow them for x86 for legacy drivers, though. 164819833afSPeter Tyser */ 165*23ff8633SDaniel Schwierzeck #define virt_to_bus virt_to_phys 166*23ff8633SDaniel Schwierzeck #define bus_to_virt phys_to_virt 167819833afSPeter Tyser 168*23ff8633SDaniel Schwierzeck static inline void __iomem *__ioremap_mode(phys_addr_t offset, unsigned long size, 169*23ff8633SDaniel Schwierzeck unsigned long flags) 170819833afSPeter Tyser { 171*23ff8633SDaniel Schwierzeck void __iomem *addr; 172*23ff8633SDaniel Schwierzeck phys_addr_t phys_addr; 173*23ff8633SDaniel Schwierzeck 174*23ff8633SDaniel Schwierzeck addr = plat_ioremap(offset, size, flags); 175*23ff8633SDaniel Schwierzeck if (addr) 176*23ff8633SDaniel Schwierzeck return addr; 177*23ff8633SDaniel Schwierzeck 178*23ff8633SDaniel Schwierzeck phys_addr = fixup_bigphys_addr(offset, size); 179*23ff8633SDaniel Schwierzeck return (void __iomem *)(unsigned long)CKSEG1ADDR(phys_addr); 180819833afSPeter Tyser } 181819833afSPeter Tyser 182819833afSPeter Tyser /* 183*23ff8633SDaniel Schwierzeck * ioremap - map bus memory into CPU space 184*23ff8633SDaniel Schwierzeck * @offset: bus address of the memory 185*23ff8633SDaniel Schwierzeck * @size: size of the resource to map 186*23ff8633SDaniel Schwierzeck * 187*23ff8633SDaniel Schwierzeck * ioremap performs a platform specific sequence of operations to 188*23ff8633SDaniel Schwierzeck * make bus memory CPU accessible via the readb/readw/readl/writeb/ 189*23ff8633SDaniel Schwierzeck * writew/writel functions and the other mmio helpers. The returned 190*23ff8633SDaniel Schwierzeck * address is not guaranteed to be usable directly as a virtual 191*23ff8633SDaniel Schwierzeck * address. 192819833afSPeter Tyser */ 193*23ff8633SDaniel Schwierzeck #define ioremap(offset, size) \ 194*23ff8633SDaniel Schwierzeck __ioremap_mode((offset), (size), _CACHE_UNCACHED) 195*23ff8633SDaniel Schwierzeck 196*23ff8633SDaniel Schwierzeck /* 197*23ff8633SDaniel Schwierzeck * ioremap_nocache - map bus memory into CPU space 198*23ff8633SDaniel Schwierzeck * @offset: bus address of the memory 199*23ff8633SDaniel Schwierzeck * @size: size of the resource to map 200*23ff8633SDaniel Schwierzeck * 201*23ff8633SDaniel Schwierzeck * ioremap_nocache performs a platform specific sequence of operations to 202*23ff8633SDaniel Schwierzeck * make bus memory CPU accessible via the readb/readw/readl/writeb/ 203*23ff8633SDaniel Schwierzeck * writew/writel functions and the other mmio helpers. The returned 204*23ff8633SDaniel Schwierzeck * address is not guaranteed to be usable directly as a virtual 205*23ff8633SDaniel Schwierzeck * address. 206*23ff8633SDaniel Schwierzeck * 207*23ff8633SDaniel Schwierzeck * This version of ioremap ensures that the memory is marked uncachable 208*23ff8633SDaniel Schwierzeck * on the CPU as well as honouring existing caching rules from things like 209*23ff8633SDaniel Schwierzeck * the PCI bus. Note that there are other caches and buffers on many 210*23ff8633SDaniel Schwierzeck * busses. In particular driver authors should read up on PCI writes 211*23ff8633SDaniel Schwierzeck * 212*23ff8633SDaniel Schwierzeck * It's useful if some control registers are in such an area and 213*23ff8633SDaniel Schwierzeck * write combining or read caching is not desirable: 214*23ff8633SDaniel Schwierzeck */ 215*23ff8633SDaniel Schwierzeck #define ioremap_nocache(offset, size) \ 216*23ff8633SDaniel Schwierzeck __ioremap_mode((offset), (size), _CACHE_UNCACHED) 217*23ff8633SDaniel Schwierzeck #define ioremap_uc ioremap_nocache 218*23ff8633SDaniel Schwierzeck 219*23ff8633SDaniel Schwierzeck /* 220*23ff8633SDaniel Schwierzeck * ioremap_cachable - map bus memory into CPU space 221*23ff8633SDaniel Schwierzeck * @offset: bus address of the memory 222*23ff8633SDaniel Schwierzeck * @size: size of the resource to map 223*23ff8633SDaniel Schwierzeck * 224*23ff8633SDaniel Schwierzeck * ioremap_nocache performs a platform specific sequence of operations to 225*23ff8633SDaniel Schwierzeck * make bus memory CPU accessible via the readb/readw/readl/writeb/ 226*23ff8633SDaniel Schwierzeck * writew/writel functions and the other mmio helpers. The returned 227*23ff8633SDaniel Schwierzeck * address is not guaranteed to be usable directly as a virtual 228*23ff8633SDaniel Schwierzeck * address. 229*23ff8633SDaniel Schwierzeck * 230*23ff8633SDaniel Schwierzeck * This version of ioremap ensures that the memory is marked cachable by 231*23ff8633SDaniel Schwierzeck * the CPU. Also enables full write-combining. Useful for some 232*23ff8633SDaniel Schwierzeck * memory-like regions on I/O busses. 233*23ff8633SDaniel Schwierzeck */ 234*23ff8633SDaniel Schwierzeck #define ioremap_cachable(offset, size) \ 235*23ff8633SDaniel Schwierzeck __ioremap_mode((offset), (size), _page_cachable_default) 236*23ff8633SDaniel Schwierzeck 237*23ff8633SDaniel Schwierzeck /* 238*23ff8633SDaniel Schwierzeck * These two are MIPS specific ioremap variant. ioremap_cacheable_cow 239*23ff8633SDaniel Schwierzeck * requests a cachable mapping, ioremap_uncached_accelerated requests a 240*23ff8633SDaniel Schwierzeck * mapping using the uncached accelerated mode which isn't supported on 241*23ff8633SDaniel Schwierzeck * all processors. 242*23ff8633SDaniel Schwierzeck */ 243*23ff8633SDaniel Schwierzeck #define ioremap_cacheable_cow(offset, size) \ 244*23ff8633SDaniel Schwierzeck __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW) 245*23ff8633SDaniel Schwierzeck #define ioremap_uncached_accelerated(offset, size) \ 246*23ff8633SDaniel Schwierzeck __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED) 247*23ff8633SDaniel Schwierzeck 248*23ff8633SDaniel Schwierzeck static inline void iounmap(const volatile void __iomem *addr) 249*23ff8633SDaniel Schwierzeck { 250*23ff8633SDaniel Schwierzeck plat_iounmap(addr); 251*23ff8633SDaniel Schwierzeck } 252*23ff8633SDaniel Schwierzeck 253*23ff8633SDaniel Schwierzeck #ifdef CONFIG_CPU_CAVIUM_OCTEON 254*23ff8633SDaniel Schwierzeck #define war_octeon_io_reorder_wmb() wmb() 255*23ff8633SDaniel Schwierzeck #else 256*23ff8633SDaniel Schwierzeck #define war_octeon_io_reorder_wmb() do { } while (0) 257*23ff8633SDaniel Schwierzeck #endif 258*23ff8633SDaniel Schwierzeck 259*23ff8633SDaniel Schwierzeck #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ 260*23ff8633SDaniel Schwierzeck \ 261*23ff8633SDaniel Schwierzeck static inline void pfx##write##bwlq(type val, \ 262*23ff8633SDaniel Schwierzeck volatile void __iomem *mem) \ 263*23ff8633SDaniel Schwierzeck { \ 264*23ff8633SDaniel Schwierzeck volatile type *__mem; \ 265*23ff8633SDaniel Schwierzeck type __val; \ 266*23ff8633SDaniel Schwierzeck \ 267*23ff8633SDaniel Schwierzeck war_octeon_io_reorder_wmb(); \ 268*23ff8633SDaniel Schwierzeck \ 269*23ff8633SDaniel Schwierzeck __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ 270*23ff8633SDaniel Schwierzeck \ 271*23ff8633SDaniel Schwierzeck __val = pfx##ioswab##bwlq(__mem, val); \ 272*23ff8633SDaniel Schwierzeck \ 273*23ff8633SDaniel Schwierzeck if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ 274*23ff8633SDaniel Schwierzeck *__mem = __val; \ 275*23ff8633SDaniel Schwierzeck else if (cpu_has_64bits) { \ 276*23ff8633SDaniel Schwierzeck type __tmp; \ 277*23ff8633SDaniel Schwierzeck \ 278*23ff8633SDaniel Schwierzeck __asm__ __volatile__( \ 279*23ff8633SDaniel Schwierzeck ".set arch=r4000" "\t\t# __writeq""\n\t" \ 280*23ff8633SDaniel Schwierzeck "dsll32 %L0, %L0, 0" "\n\t" \ 281*23ff8633SDaniel Schwierzeck "dsrl32 %L0, %L0, 0" "\n\t" \ 282*23ff8633SDaniel Schwierzeck "dsll32 %M0, %M0, 0" "\n\t" \ 283*23ff8633SDaniel Schwierzeck "or %L0, %L0, %M0" "\n\t" \ 284*23ff8633SDaniel Schwierzeck "sd %L0, %2" "\n\t" \ 285*23ff8633SDaniel Schwierzeck ".set mips0" "\n" \ 286*23ff8633SDaniel Schwierzeck : "=r" (__tmp) \ 287*23ff8633SDaniel Schwierzeck : "0" (__val), "m" (*__mem)); \ 288*23ff8633SDaniel Schwierzeck } else \ 289*23ff8633SDaniel Schwierzeck BUG(); \ 290*23ff8633SDaniel Schwierzeck } \ 291*23ff8633SDaniel Schwierzeck \ 292*23ff8633SDaniel Schwierzeck static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ 293*23ff8633SDaniel Schwierzeck { \ 294*23ff8633SDaniel Schwierzeck volatile type *__mem; \ 295*23ff8633SDaniel Schwierzeck type __val; \ 296*23ff8633SDaniel Schwierzeck \ 297*23ff8633SDaniel Schwierzeck __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ 298*23ff8633SDaniel Schwierzeck \ 299*23ff8633SDaniel Schwierzeck if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ 300*23ff8633SDaniel Schwierzeck __val = *__mem; \ 301*23ff8633SDaniel Schwierzeck else if (cpu_has_64bits) { \ 302*23ff8633SDaniel Schwierzeck __asm__ __volatile__( \ 303*23ff8633SDaniel Schwierzeck ".set arch=r4000" "\t\t# __readq" "\n\t" \ 304*23ff8633SDaniel Schwierzeck "ld %L0, %1" "\n\t" \ 305*23ff8633SDaniel Schwierzeck "dsra32 %M0, %L0, 0" "\n\t" \ 306*23ff8633SDaniel Schwierzeck "sll %L0, %L0, 0" "\n\t" \ 307*23ff8633SDaniel Schwierzeck ".set mips0" "\n" \ 308*23ff8633SDaniel Schwierzeck : "=r" (__val) \ 309*23ff8633SDaniel Schwierzeck : "m" (*__mem)); \ 310*23ff8633SDaniel Schwierzeck } else { \ 311*23ff8633SDaniel Schwierzeck __val = 0; \ 312*23ff8633SDaniel Schwierzeck BUG(); \ 313*23ff8633SDaniel Schwierzeck } \ 314*23ff8633SDaniel Schwierzeck \ 315*23ff8633SDaniel Schwierzeck return pfx##ioswab##bwlq(__mem, __val); \ 316*23ff8633SDaniel Schwierzeck } 317*23ff8633SDaniel Schwierzeck 318*23ff8633SDaniel Schwierzeck #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \ 319*23ff8633SDaniel Schwierzeck \ 320*23ff8633SDaniel Schwierzeck static inline void pfx##out##bwlq##p(type val, unsigned long port) \ 321*23ff8633SDaniel Schwierzeck { \ 322*23ff8633SDaniel Schwierzeck volatile type *__addr; \ 323*23ff8633SDaniel Schwierzeck type __val; \ 324*23ff8633SDaniel Schwierzeck \ 325*23ff8633SDaniel Schwierzeck war_octeon_io_reorder_wmb(); \ 326*23ff8633SDaniel Schwierzeck \ 327*23ff8633SDaniel Schwierzeck __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ 328*23ff8633SDaniel Schwierzeck \ 329*23ff8633SDaniel Schwierzeck __val = pfx##ioswab##bwlq(__addr, val); \ 330*23ff8633SDaniel Schwierzeck \ 331*23ff8633SDaniel Schwierzeck /* Really, we want this to be atomic */ \ 332*23ff8633SDaniel Schwierzeck BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ 333*23ff8633SDaniel Schwierzeck \ 334*23ff8633SDaniel Schwierzeck *__addr = __val; \ 335*23ff8633SDaniel Schwierzeck slow; \ 336*23ff8633SDaniel Schwierzeck } \ 337*23ff8633SDaniel Schwierzeck \ 338*23ff8633SDaniel Schwierzeck static inline type pfx##in##bwlq##p(unsigned long port) \ 339*23ff8633SDaniel Schwierzeck { \ 340*23ff8633SDaniel Schwierzeck volatile type *__addr; \ 341*23ff8633SDaniel Schwierzeck type __val; \ 342*23ff8633SDaniel Schwierzeck \ 343*23ff8633SDaniel Schwierzeck __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ 344*23ff8633SDaniel Schwierzeck \ 345*23ff8633SDaniel Schwierzeck BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ 346*23ff8633SDaniel Schwierzeck \ 347*23ff8633SDaniel Schwierzeck __val = *__addr; \ 348*23ff8633SDaniel Schwierzeck slow; \ 349*23ff8633SDaniel Schwierzeck \ 350*23ff8633SDaniel Schwierzeck return pfx##ioswab##bwlq(__addr, __val); \ 351*23ff8633SDaniel Schwierzeck } 352*23ff8633SDaniel Schwierzeck 353*23ff8633SDaniel Schwierzeck #define __BUILD_MEMORY_PFX(bus, bwlq, type) \ 354*23ff8633SDaniel Schwierzeck \ 355*23ff8633SDaniel Schwierzeck __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1) 356*23ff8633SDaniel Schwierzeck 357*23ff8633SDaniel Schwierzeck #define BUILDIO_MEM(bwlq, type) \ 358*23ff8633SDaniel Schwierzeck \ 359*23ff8633SDaniel Schwierzeck __BUILD_MEMORY_PFX(__raw_, bwlq, type) \ 360*23ff8633SDaniel Schwierzeck __BUILD_MEMORY_PFX(, bwlq, type) \ 361*23ff8633SDaniel Schwierzeck __BUILD_MEMORY_PFX(__mem_, bwlq, type) \ 362*23ff8633SDaniel Schwierzeck 363*23ff8633SDaniel Schwierzeck BUILDIO_MEM(b, u8) 364*23ff8633SDaniel Schwierzeck BUILDIO_MEM(w, u16) 365*23ff8633SDaniel Schwierzeck BUILDIO_MEM(l, u32) 366*23ff8633SDaniel Schwierzeck BUILDIO_MEM(q, u64) 367*23ff8633SDaniel Schwierzeck 368*23ff8633SDaniel Schwierzeck #define __BUILD_IOPORT_PFX(bus, bwlq, type) \ 369*23ff8633SDaniel Schwierzeck __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \ 370*23ff8633SDaniel Schwierzeck __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO) 371*23ff8633SDaniel Schwierzeck 372*23ff8633SDaniel Schwierzeck #define BUILDIO_IOPORT(bwlq, type) \ 373*23ff8633SDaniel Schwierzeck __BUILD_IOPORT_PFX(, bwlq, type) \ 374*23ff8633SDaniel Schwierzeck __BUILD_IOPORT_PFX(__mem_, bwlq, type) 375*23ff8633SDaniel Schwierzeck 376*23ff8633SDaniel Schwierzeck BUILDIO_IOPORT(b, u8) 377*23ff8633SDaniel Schwierzeck BUILDIO_IOPORT(w, u16) 378*23ff8633SDaniel Schwierzeck BUILDIO_IOPORT(l, u32) 379*23ff8633SDaniel Schwierzeck #ifdef CONFIG_64BIT 380*23ff8633SDaniel Schwierzeck BUILDIO_IOPORT(q, u64) 381*23ff8633SDaniel Schwierzeck #endif 382*23ff8633SDaniel Schwierzeck 383*23ff8633SDaniel Schwierzeck #define __BUILDIO(bwlq, type) \ 384*23ff8633SDaniel Schwierzeck \ 385*23ff8633SDaniel Schwierzeck __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0) 386*23ff8633SDaniel Schwierzeck 387*23ff8633SDaniel Schwierzeck __BUILDIO(q, u64) 388*23ff8633SDaniel Schwierzeck 389*23ff8633SDaniel Schwierzeck #define readb_relaxed readb 390*23ff8633SDaniel Schwierzeck #define readw_relaxed readw 391*23ff8633SDaniel Schwierzeck #define readl_relaxed readl 392*23ff8633SDaniel Schwierzeck #define readq_relaxed readq 393*23ff8633SDaniel Schwierzeck 394*23ff8633SDaniel Schwierzeck #define writeb_relaxed writeb 395*23ff8633SDaniel Schwierzeck #define writew_relaxed writew 396*23ff8633SDaniel Schwierzeck #define writel_relaxed writel 397*23ff8633SDaniel Schwierzeck #define writeq_relaxed writeq 398*23ff8633SDaniel Schwierzeck 399*23ff8633SDaniel Schwierzeck #define readb_be(addr) \ 400*23ff8633SDaniel Schwierzeck __raw_readb((__force unsigned *)(addr)) 401*23ff8633SDaniel Schwierzeck #define readw_be(addr) \ 402*23ff8633SDaniel Schwierzeck be16_to_cpu(__raw_readw((__force unsigned *)(addr))) 403*23ff8633SDaniel Schwierzeck #define readl_be(addr) \ 404*23ff8633SDaniel Schwierzeck be32_to_cpu(__raw_readl((__force unsigned *)(addr))) 405*23ff8633SDaniel Schwierzeck #define readq_be(addr) \ 406*23ff8633SDaniel Schwierzeck be64_to_cpu(__raw_readq((__force unsigned *)(addr))) 407*23ff8633SDaniel Schwierzeck 408*23ff8633SDaniel Schwierzeck #define writeb_be(val, addr) \ 409*23ff8633SDaniel Schwierzeck __raw_writeb((val), (__force unsigned *)(addr)) 410*23ff8633SDaniel Schwierzeck #define writew_be(val, addr) \ 411*23ff8633SDaniel Schwierzeck __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr)) 412*23ff8633SDaniel Schwierzeck #define writel_be(val, addr) \ 413*23ff8633SDaniel Schwierzeck __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr)) 414*23ff8633SDaniel Schwierzeck #define writeq_be(val, addr) \ 415*23ff8633SDaniel Schwierzeck __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr)) 416*23ff8633SDaniel Schwierzeck 417*23ff8633SDaniel Schwierzeck /* 418*23ff8633SDaniel Schwierzeck * Some code tests for these symbols 419*23ff8633SDaniel Schwierzeck */ 420*23ff8633SDaniel Schwierzeck #define readq readq 421*23ff8633SDaniel Schwierzeck #define writeq writeq 422*23ff8633SDaniel Schwierzeck 423*23ff8633SDaniel Schwierzeck #define __BUILD_MEMORY_STRING(bwlq, type) \ 424*23ff8633SDaniel Schwierzeck \ 425*23ff8633SDaniel Schwierzeck static inline void writes##bwlq(volatile void __iomem *mem, \ 426*23ff8633SDaniel Schwierzeck const void *addr, unsigned int count) \ 427*23ff8633SDaniel Schwierzeck { \ 428*23ff8633SDaniel Schwierzeck const volatile type *__addr = addr; \ 429*23ff8633SDaniel Schwierzeck \ 430*23ff8633SDaniel Schwierzeck while (count--) { \ 431*23ff8633SDaniel Schwierzeck __mem_write##bwlq(*__addr, mem); \ 432*23ff8633SDaniel Schwierzeck __addr++; \ 433*23ff8633SDaniel Schwierzeck } \ 434*23ff8633SDaniel Schwierzeck } \ 435*23ff8633SDaniel Schwierzeck \ 436*23ff8633SDaniel Schwierzeck static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \ 437*23ff8633SDaniel Schwierzeck unsigned int count) \ 438*23ff8633SDaniel Schwierzeck { \ 439*23ff8633SDaniel Schwierzeck volatile type *__addr = addr; \ 440*23ff8633SDaniel Schwierzeck \ 441*23ff8633SDaniel Schwierzeck while (count--) { \ 442*23ff8633SDaniel Schwierzeck *__addr = __mem_read##bwlq(mem); \ 443*23ff8633SDaniel Schwierzeck __addr++; \ 444*23ff8633SDaniel Schwierzeck } \ 445*23ff8633SDaniel Schwierzeck } 446*23ff8633SDaniel Schwierzeck 447*23ff8633SDaniel Schwierzeck #define __BUILD_IOPORT_STRING(bwlq, type) \ 448*23ff8633SDaniel Schwierzeck \ 449*23ff8633SDaniel Schwierzeck static inline void outs##bwlq(unsigned long port, const void *addr, \ 450*23ff8633SDaniel Schwierzeck unsigned int count) \ 451*23ff8633SDaniel Schwierzeck { \ 452*23ff8633SDaniel Schwierzeck const volatile type *__addr = addr; \ 453*23ff8633SDaniel Schwierzeck \ 454*23ff8633SDaniel Schwierzeck while (count--) { \ 455*23ff8633SDaniel Schwierzeck __mem_out##bwlq(*__addr, port); \ 456*23ff8633SDaniel Schwierzeck __addr++; \ 457*23ff8633SDaniel Schwierzeck } \ 458*23ff8633SDaniel Schwierzeck } \ 459*23ff8633SDaniel Schwierzeck \ 460*23ff8633SDaniel Schwierzeck static inline void ins##bwlq(unsigned long port, void *addr, \ 461*23ff8633SDaniel Schwierzeck unsigned int count) \ 462*23ff8633SDaniel Schwierzeck { \ 463*23ff8633SDaniel Schwierzeck volatile type *__addr = addr; \ 464*23ff8633SDaniel Schwierzeck \ 465*23ff8633SDaniel Schwierzeck while (count--) { \ 466*23ff8633SDaniel Schwierzeck *__addr = __mem_in##bwlq(port); \ 467*23ff8633SDaniel Schwierzeck __addr++; \ 468*23ff8633SDaniel Schwierzeck } \ 469*23ff8633SDaniel Schwierzeck } 470*23ff8633SDaniel Schwierzeck 471*23ff8633SDaniel Schwierzeck #define BUILDSTRING(bwlq, type) \ 472*23ff8633SDaniel Schwierzeck \ 473*23ff8633SDaniel Schwierzeck __BUILD_MEMORY_STRING(bwlq, type) \ 474*23ff8633SDaniel Schwierzeck __BUILD_IOPORT_STRING(bwlq, type) 475*23ff8633SDaniel Schwierzeck 476*23ff8633SDaniel Schwierzeck BUILDSTRING(b, u8) 477*23ff8633SDaniel Schwierzeck BUILDSTRING(w, u16) 478*23ff8633SDaniel Schwierzeck BUILDSTRING(l, u32) 479*23ff8633SDaniel Schwierzeck #ifdef CONFIG_64BIT 480*23ff8633SDaniel Schwierzeck BUILDSTRING(q, u64) 481*23ff8633SDaniel Schwierzeck #endif 482*23ff8633SDaniel Schwierzeck 483*23ff8633SDaniel Schwierzeck 484*23ff8633SDaniel Schwierzeck #ifdef CONFIG_CPU_CAVIUM_OCTEON 485*23ff8633SDaniel Schwierzeck #define mmiowb() wmb() 486*23ff8633SDaniel Schwierzeck #else 487*23ff8633SDaniel Schwierzeck /* Depends on MIPS II instruction set */ 488*23ff8633SDaniel Schwierzeck #define mmiowb() asm volatile ("sync" ::: "memory") 489*23ff8633SDaniel Schwierzeck #endif 490*23ff8633SDaniel Schwierzeck 491*23ff8633SDaniel Schwierzeck static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) 492*23ff8633SDaniel Schwierzeck { 493*23ff8633SDaniel Schwierzeck memset((void __force *)addr, val, count); 494*23ff8633SDaniel Schwierzeck } 495*23ff8633SDaniel Schwierzeck static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) 496*23ff8633SDaniel Schwierzeck { 497*23ff8633SDaniel Schwierzeck memcpy(dst, (void __force *)src, count); 498*23ff8633SDaniel Schwierzeck } 499*23ff8633SDaniel Schwierzeck static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) 500*23ff8633SDaniel Schwierzeck { 501*23ff8633SDaniel Schwierzeck memcpy((void __force *)dst, src, count); 502*23ff8633SDaniel Schwierzeck } 503*23ff8633SDaniel Schwierzeck 504*23ff8633SDaniel Schwierzeck /* 505*23ff8633SDaniel Schwierzeck * Read a 32-bit register that requires a 64-bit read cycle on the bus. 506*23ff8633SDaniel Schwierzeck * Avoid interrupt mucking, just adjust the address for 4-byte access. 507*23ff8633SDaniel Schwierzeck * Assume the addresses are 8-byte aligned. 508*23ff8633SDaniel Schwierzeck */ 509*23ff8633SDaniel Schwierzeck #ifdef __MIPSEB__ 510*23ff8633SDaniel Schwierzeck #define __CSR_32_ADJUST 4 511*23ff8633SDaniel Schwierzeck #else 512*23ff8633SDaniel Schwierzeck #define __CSR_32_ADJUST 0 513*23ff8633SDaniel Schwierzeck #endif 514*23ff8633SDaniel Schwierzeck 515*23ff8633SDaniel Schwierzeck #define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v)) 516*23ff8633SDaniel Schwierzeck #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST)) 517*23ff8633SDaniel Schwierzeck 518*23ff8633SDaniel Schwierzeck /* 519*23ff8633SDaniel Schwierzeck * U-Boot specific 520*23ff8633SDaniel Schwierzeck */ 521*23ff8633SDaniel Schwierzeck #define sync() mmiowb() 522*23ff8633SDaniel Schwierzeck 523*23ff8633SDaniel Schwierzeck #define MAP_NOCACHE (1) 524819833afSPeter Tyser #define MAP_WRCOMBINE (0) 525819833afSPeter Tyser #define MAP_WRBACK (0) 526819833afSPeter Tyser #define MAP_WRTHROUGH (0) 527819833afSPeter Tyser 528819833afSPeter Tyser static inline void * 529819833afSPeter Tyser map_physmem(phys_addr_t paddr, unsigned long len, unsigned long flags) 530819833afSPeter Tyser { 531*23ff8633SDaniel Schwierzeck if (flags == MAP_NOCACHE) 532*23ff8633SDaniel Schwierzeck return ioremap(paddr, len); 533*23ff8633SDaniel Schwierzeck 534819833afSPeter Tyser return (void *)paddr; 535819833afSPeter Tyser } 536819833afSPeter Tyser 537819833afSPeter Tyser /* 538819833afSPeter Tyser * Take down a mapping set up by map_physmem(). 539819833afSPeter Tyser */ 540819833afSPeter Tyser static inline void unmap_physmem(void *vaddr, unsigned long flags) 541819833afSPeter Tyser { 542819833afSPeter Tyser } 543819833afSPeter Tyser 544819833afSPeter Tyser #endif /* _ASM_IO_H */ 545