1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Based on arch/arm/mm/ioremap.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * (C) Copyright 1995 1996 Linus Torvalds
6*4882a593Smuzhiyun * Hacked for ARM by Phil Blundell <philb@gnu.org>
7*4882a593Smuzhiyun * Hacked to allow all architectures to build, and various cleanups
8*4882a593Smuzhiyun * by Russell King
9*4882a593Smuzhiyun * Copyright (C) 2012 ARM Ltd.
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/export.h>
13*4882a593Smuzhiyun #include <linux/mm.h>
14*4882a593Smuzhiyun #include <linux/vmalloc.h>
15*4882a593Smuzhiyun #include <linux/io.h>
16*4882a593Smuzhiyun #include <linux/memblock.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <asm/fixmap.h>
19*4882a593Smuzhiyun #include <asm/tlbflush.h>
20*4882a593Smuzhiyun
__ioremap_caller(phys_addr_t phys_addr,size_t size,pgprot_t prot,void * caller)21*4882a593Smuzhiyun static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
22*4882a593Smuzhiyun pgprot_t prot, void *caller)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun unsigned long last_addr;
25*4882a593Smuzhiyun unsigned long offset = phys_addr & ~PAGE_MASK;
26*4882a593Smuzhiyun int err;
27*4882a593Smuzhiyun unsigned long addr;
28*4882a593Smuzhiyun struct vm_struct *area;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun * Page align the mapping address and size, taking account of any
32*4882a593Smuzhiyun * offset.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun phys_addr &= PAGE_MASK;
35*4882a593Smuzhiyun size = PAGE_ALIGN(size + offset);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun * Don't allow wraparound, zero size or outside PHYS_MASK.
39*4882a593Smuzhiyun */
40*4882a593Smuzhiyun last_addr = phys_addr + size - 1;
41*4882a593Smuzhiyun if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK))
42*4882a593Smuzhiyun return NULL;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * Don't allow RAM to be mapped.
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr))))
48*4882a593Smuzhiyun return NULL;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun area = get_vm_area_caller(size, VM_IOREMAP, caller);
51*4882a593Smuzhiyun if (!area)
52*4882a593Smuzhiyun return NULL;
53*4882a593Smuzhiyun addr = (unsigned long)area->addr;
54*4882a593Smuzhiyun area->phys_addr = phys_addr;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun err = ioremap_page_range(addr, addr + size, phys_addr, prot);
57*4882a593Smuzhiyun if (err) {
58*4882a593Smuzhiyun vunmap((void *)addr);
59*4882a593Smuzhiyun return NULL;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun return (void __iomem *)(offset + addr);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
__ioremap(phys_addr_t phys_addr,size_t size,pgprot_t prot)65*4882a593Smuzhiyun void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun return __ioremap_caller(phys_addr, size, prot,
68*4882a593Smuzhiyun __builtin_return_address(0));
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun EXPORT_SYMBOL(__ioremap);
71*4882a593Smuzhiyun
iounmap(volatile void __iomem * io_addr)72*4882a593Smuzhiyun void iounmap(volatile void __iomem *io_addr)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /*
77*4882a593Smuzhiyun * We could get an address outside vmalloc range in case
78*4882a593Smuzhiyun * of ioremap_cache() reusing a RAM mapping.
79*4882a593Smuzhiyun */
80*4882a593Smuzhiyun if (is_vmalloc_addr((void *)addr))
81*4882a593Smuzhiyun vunmap((void *)addr);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun EXPORT_SYMBOL(iounmap);
84*4882a593Smuzhiyun
ioremap_cache(phys_addr_t phys_addr,size_t size)85*4882a593Smuzhiyun void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun /* For normal memory we already have a cacheable mapping. */
88*4882a593Smuzhiyun if (pfn_valid(__phys_to_pfn(phys_addr)))
89*4882a593Smuzhiyun return (void __iomem *)__phys_to_virt(phys_addr);
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
92*4882a593Smuzhiyun __builtin_return_address(0));
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun EXPORT_SYMBOL(ioremap_cache);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun * Must be called after early_fixmap_init
98*4882a593Smuzhiyun */
early_ioremap_init(void)99*4882a593Smuzhiyun void __init early_ioremap_init(void)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun early_ioremap_setup();
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
arch_memremap_can_ram_remap(resource_size_t offset,size_t size,unsigned long flags)104*4882a593Smuzhiyun bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
105*4882a593Smuzhiyun unsigned long flags)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun unsigned long pfn = PHYS_PFN(offset);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun return memblock_is_map_memory(pfn);
110*4882a593Smuzhiyun }
111