xref: /OK3568_Linux_fs/kernel/arch/xtensa/mm/highmem.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * High memory support for Xtensa architecture
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General
5*4882a593Smuzhiyun  * Public License.  See the file "COPYING" in the main directory of
6*4882a593Smuzhiyun  * this archive for more details.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Copyright (C) 2014 Cadence Design Systems Inc.
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/export.h>
12*4882a593Smuzhiyun #include <linux/highmem.h>
13*4882a593Smuzhiyun #include <asm/tlbflush.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun static pte_t *kmap_pte;
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #if DCACHE_WAY_SIZE > PAGE_SIZE
18*4882a593Smuzhiyun unsigned int last_pkmap_nr_arr[DCACHE_N_COLORS];
19*4882a593Smuzhiyun wait_queue_head_t pkmap_map_wait_arr[DCACHE_N_COLORS];
20*4882a593Smuzhiyun 
kmap_waitqueues_init(void)21*4882a593Smuzhiyun static void __init kmap_waitqueues_init(void)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	unsigned int i;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(pkmap_map_wait_arr); ++i)
26*4882a593Smuzhiyun 		init_waitqueue_head(pkmap_map_wait_arr + i);
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun #else
kmap_waitqueues_init(void)29*4882a593Smuzhiyun static inline void kmap_waitqueues_init(void)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun #endif
33*4882a593Smuzhiyun 
kmap_idx(int type,unsigned long color)34*4882a593Smuzhiyun static inline enum fixed_addresses kmap_idx(int type, unsigned long color)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	return (type + KM_TYPE_NR * smp_processor_id()) * DCACHE_N_COLORS +
37*4882a593Smuzhiyun 		color;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun 
kmap_atomic_high_prot(struct page * page,pgprot_t prot)40*4882a593Smuzhiyun void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	enum fixed_addresses idx;
43*4882a593Smuzhiyun 	unsigned long vaddr;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	idx = kmap_idx(kmap_atomic_idx_push(),
46*4882a593Smuzhiyun 		       DCACHE_ALIAS(page_to_phys(page)));
47*4882a593Smuzhiyun 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
48*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_HIGHMEM
49*4882a593Smuzhiyun 	BUG_ON(!pte_none(*(kmap_pte + idx)));
50*4882a593Smuzhiyun #endif
51*4882a593Smuzhiyun 	set_pte(kmap_pte + idx, mk_pte(page, prot));
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	return (void *)vaddr;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun EXPORT_SYMBOL(kmap_atomic_high_prot);
56*4882a593Smuzhiyun 
kunmap_atomic_high(void * kvaddr)57*4882a593Smuzhiyun void kunmap_atomic_high(void *kvaddr)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	if (kvaddr >= (void *)FIXADDR_START &&
60*4882a593Smuzhiyun 	    kvaddr < (void *)FIXADDR_TOP) {
61*4882a593Smuzhiyun 		int idx = kmap_idx(kmap_atomic_idx(),
62*4882a593Smuzhiyun 				   DCACHE_ALIAS((unsigned long)kvaddr));
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 		/*
65*4882a593Smuzhiyun 		 * Force other mappings to Oops if they'll try to access this
66*4882a593Smuzhiyun 		 * pte without first remap it.  Keeping stale mappings around
67*4882a593Smuzhiyun 		 * is a bad idea also, in case the page changes cacheability
68*4882a593Smuzhiyun 		 * attributes or becomes a protected page in a hypervisor.
69*4882a593Smuzhiyun 		 */
70*4882a593Smuzhiyun 		pte_clear(&init_mm, kvaddr, kmap_pte + idx);
71*4882a593Smuzhiyun 		local_flush_tlb_kernel_range((unsigned long)kvaddr,
72*4882a593Smuzhiyun 					     (unsigned long)kvaddr + PAGE_SIZE);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 		kmap_atomic_idx_pop();
75*4882a593Smuzhiyun 	}
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun EXPORT_SYMBOL(kunmap_atomic_high);
78*4882a593Smuzhiyun 
kmap_init(void)79*4882a593Smuzhiyun void __init kmap_init(void)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	unsigned long kmap_vstart;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	/* Check if this memory layout is broken because PKMAP overlaps
84*4882a593Smuzhiyun 	 * page table.
85*4882a593Smuzhiyun 	 */
86*4882a593Smuzhiyun 	BUILD_BUG_ON(PKMAP_BASE < TLBTEMP_BASE_1 + TLBTEMP_SIZE);
87*4882a593Smuzhiyun 	/* cache the first kmap pte */
88*4882a593Smuzhiyun 	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
89*4882a593Smuzhiyun 	kmap_pte = virt_to_kpte(kmap_vstart);
90*4882a593Smuzhiyun 	kmap_waitqueues_init();
91*4882a593Smuzhiyun }
92