1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright © 2008 Ingo Molnar
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <asm/iomap.h>
7*4882a593Smuzhiyun #include <asm/memtype.h>
8*4882a593Smuzhiyun #include <linux/export.h>
9*4882a593Smuzhiyun #include <linux/highmem.h>
10*4882a593Smuzhiyun
is_io_mapping_possible(resource_size_t base,unsigned long size)11*4882a593Smuzhiyun static int is_io_mapping_possible(resource_size_t base, unsigned long size)
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun #if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT)
14*4882a593Smuzhiyun /* There is no way to map greater than 1 << 32 address without PAE */
15*4882a593Smuzhiyun if (base + size > 0x100000000ULL)
16*4882a593Smuzhiyun return 0;
17*4882a593Smuzhiyun #endif
18*4882a593Smuzhiyun return 1;
19*4882a593Smuzhiyun }
20*4882a593Smuzhiyun
iomap_create_wc(resource_size_t base,unsigned long size,pgprot_t * prot)21*4882a593Smuzhiyun int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun enum page_cache_mode pcm = _PAGE_CACHE_MODE_WC;
24*4882a593Smuzhiyun int ret;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun if (!is_io_mapping_possible(base, size))
27*4882a593Smuzhiyun return -EINVAL;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun ret = memtype_reserve_io(base, base + size, &pcm);
30*4882a593Smuzhiyun if (ret)
31*4882a593Smuzhiyun return ret;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun *prot = __pgprot(__PAGE_KERNEL | cachemode2protval(pcm));
34*4882a593Smuzhiyun /* Filter out unsupported __PAGE_KERNEL* bits: */
35*4882a593Smuzhiyun pgprot_val(*prot) &= __default_kernel_pte_mask;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun return 0;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iomap_create_wc);
40*4882a593Smuzhiyun
iomap_free(resource_size_t base,unsigned long size)41*4882a593Smuzhiyun void iomap_free(resource_size_t base, unsigned long size)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun memtype_free_io(base, base + size);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iomap_free);
46*4882a593Smuzhiyun
kmap_atomic_prot_pfn(unsigned long pfn,pgprot_t prot)47*4882a593Smuzhiyun void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun unsigned long vaddr;
50*4882a593Smuzhiyun int idx, type;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun preempt_disable();
53*4882a593Smuzhiyun pagefault_disable();
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun type = kmap_atomic_idx_push();
56*4882a593Smuzhiyun idx = type + KM_TYPE_NR * smp_processor_id();
57*4882a593Smuzhiyun vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
58*4882a593Smuzhiyun set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
59*4882a593Smuzhiyun arch_flush_lazy_mmu_mode();
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun return (void *)vaddr;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /*
65*4882a593Smuzhiyun * Map 'pfn' using protections 'prot'
66*4882a593Smuzhiyun */
67*4882a593Smuzhiyun void __iomem *
iomap_atomic_prot_pfn(unsigned long pfn,pgprot_t prot)68*4882a593Smuzhiyun iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun /*
71*4882a593Smuzhiyun * For non-PAT systems, translate non-WB request to UC- just in
72*4882a593Smuzhiyun * case the caller set the PWT bit to prot directly without using
73*4882a593Smuzhiyun * pgprot_writecombine(). UC- translates to uncached if the MTRR
74*4882a593Smuzhiyun * is UC or WC. UC- gets the real intention, of the user, which is
75*4882a593Smuzhiyun * "WC if the MTRR is WC, UC if you can't do that."
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun if (!pat_enabled() && pgprot2cachemode(prot) != _PAGE_CACHE_MODE_WB)
78*4882a593Smuzhiyun prot = __pgprot(__PAGE_KERNEL |
79*4882a593Smuzhiyun cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /* Filter out unsupported __PAGE_KERNEL* bits: */
82*4882a593Smuzhiyun pgprot_val(prot) &= __default_kernel_pte_mask;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun void
iounmap_atomic(void __iomem * kvaddr)89*4882a593Smuzhiyun iounmap_atomic(void __iomem *kvaddr)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
94*4882a593Smuzhiyun vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
95*4882a593Smuzhiyun int idx, type;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun type = kmap_atomic_idx();
98*4882a593Smuzhiyun idx = type + KM_TYPE_NR * smp_processor_id();
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_HIGHMEM
101*4882a593Smuzhiyun WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
102*4882a593Smuzhiyun #endif
103*4882a593Smuzhiyun /*
104*4882a593Smuzhiyun * Force other mappings to Oops if they'll try to access this
105*4882a593Smuzhiyun * pte without first remap it. Keeping stale mappings around
106*4882a593Smuzhiyun * is a bad idea also, in case the page changes cacheability
107*4882a593Smuzhiyun * attributes or becomes a protected page in a hypervisor.
108*4882a593Smuzhiyun */
109*4882a593Smuzhiyun kpte_clear_flush(kmap_pte-idx, vaddr);
110*4882a593Smuzhiyun kmap_atomic_idx_pop();
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun pagefault_enable();
114*4882a593Smuzhiyun preempt_enable();
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iounmap_atomic);
117