xref: /OK3568_Linux_fs/kernel/arch/powerpc/mm/ioremap_64.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun #include <linux/io.h>
4*4882a593Smuzhiyun #include <linux/slab.h>
5*4882a593Smuzhiyun #include <linux/vmalloc.h>
6*4882a593Smuzhiyun 
__ioremap_caller(phys_addr_t addr,unsigned long size,pgprot_t prot,void * caller)7*4882a593Smuzhiyun void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size,
8*4882a593Smuzhiyun 			       pgprot_t prot, void *caller)
9*4882a593Smuzhiyun {
10*4882a593Smuzhiyun 	phys_addr_t paligned, offset;
11*4882a593Smuzhiyun 	void __iomem *ret;
12*4882a593Smuzhiyun 	int err;
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun 	/* We don't support the 4K PFN hack with ioremap */
15*4882a593Smuzhiyun 	if (pgprot_val(prot) & H_PAGE_4K_PFN)
16*4882a593Smuzhiyun 		return NULL;
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun 	/*
19*4882a593Smuzhiyun 	 * Choose an address to map it to. Once the vmalloc system is running,
20*4882a593Smuzhiyun 	 * we use it. Before that, we map using addresses going up from
21*4882a593Smuzhiyun 	 * ioremap_bot.  vmalloc will use the addresses from IOREMAP_BASE
22*4882a593Smuzhiyun 	 * through ioremap_bot.
23*4882a593Smuzhiyun 	 */
24*4882a593Smuzhiyun 	paligned = addr & PAGE_MASK;
25*4882a593Smuzhiyun 	offset = addr & ~PAGE_MASK;
26*4882a593Smuzhiyun 	size = PAGE_ALIGN(addr + size) - paligned;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	if (size == 0 || paligned == 0)
29*4882a593Smuzhiyun 		return NULL;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	if (slab_is_available())
32*4882a593Smuzhiyun 		return do_ioremap(paligned, offset, size, prot, caller);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	pr_warn("ioremap() called early from %pS. Use early_ioremap() instead\n", caller);
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	err = early_ioremap_range(ioremap_bot, paligned, size, prot);
37*4882a593Smuzhiyun 	if (err)
38*4882a593Smuzhiyun 		return NULL;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	ret = (void __iomem *)ioremap_bot + offset;
41*4882a593Smuzhiyun 	ioremap_bot += size;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	return ret;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun /*
47*4882a593Smuzhiyun  * Unmap an IO region and remove it from vmalloc'd list.
48*4882a593Smuzhiyun  * Access to IO memory should be serialized by driver.
49*4882a593Smuzhiyun  */
iounmap(volatile void __iomem * token)50*4882a593Smuzhiyun void iounmap(volatile void __iomem *token)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	void *addr;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	if (!slab_is_available())
55*4882a593Smuzhiyun 		return;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	addr = (void *)((unsigned long __force)PCI_FIX_ADDR(token) & PAGE_MASK);
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	if ((unsigned long)addr < ioremap_bot) {
60*4882a593Smuzhiyun 		pr_warn("Attempt to iounmap early bolted mapping at 0x%p\n", addr);
61*4882a593Smuzhiyun 		return;
62*4882a593Smuzhiyun 	}
63*4882a593Smuzhiyun 	vunmap(addr);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun EXPORT_SYMBOL(iounmap);
66