xref: /OK3568_Linux_fs/kernel/arch/sparc/include/asm/highmem.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * highmem.h: virtual kernel memory mappings for high memory
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Used in CONFIG_HIGHMEM systems for memory pages which
6*4882a593Smuzhiyun  * are not addressable by direct kernel virtual addresses.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Copyright (C) 1999 Gerhard Wichert, Siemens AG
9*4882a593Smuzhiyun  *		      Gerhard.Wichert@pdb.siemens.de
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * Redesigned the x86 32-bit VM architecture to deal with
13*4882a593Smuzhiyun  * up to 16 Terrabyte physical memory. With current x86 CPUs
14*4882a593Smuzhiyun  * we now support up to 64 Gigabytes physical RAM.
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #ifndef _ASM_HIGHMEM_H
20*4882a593Smuzhiyun #define _ASM_HIGHMEM_H
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #ifdef __KERNEL__
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <linux/interrupt.h>
25*4882a593Smuzhiyun #include <linux/pgtable.h>
26*4882a593Smuzhiyun #include <asm/vaddrs.h>
27*4882a593Smuzhiyun #include <asm/kmap_types.h>
28*4882a593Smuzhiyun #include <asm/pgtsrmmu.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /* declarations for highmem.c */
31*4882a593Smuzhiyun extern unsigned long highstart_pfn, highend_pfn;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define kmap_prot __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE)
34*4882a593Smuzhiyun extern pte_t *pkmap_page_table;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun void kmap_init(void) __init;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun  * Right now we initialize only a single pte table. It can be extended
40*4882a593Smuzhiyun  * easily, subsequent pte tables have to be allocated in one physical
41*4882a593Smuzhiyun  * chunk of RAM.  Currently the simplest way to do this is to align the
42*4882a593Smuzhiyun  * pkmap region on a pagetable boundary (4MB).
43*4882a593Smuzhiyun  */
44*4882a593Smuzhiyun #define LAST_PKMAP 1024
45*4882a593Smuzhiyun #define PKMAP_SIZE (LAST_PKMAP << PAGE_SHIFT)
46*4882a593Smuzhiyun #define PKMAP_BASE PMD_ALIGN(SRMMU_NOCACHE_VADDR + (SRMMU_MAX_NOCACHE_PAGES << PAGE_SHIFT))
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define LAST_PKMAP_MASK (LAST_PKMAP - 1)
49*4882a593Smuzhiyun #define PKMAP_NR(virt)  ((virt - PKMAP_BASE) >> PAGE_SHIFT)
50*4882a593Smuzhiyun #define PKMAP_ADDR(nr)  (PKMAP_BASE + ((nr) << PAGE_SHIFT))
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun #define PKMAP_END (PKMAP_ADDR(LAST_PKMAP))
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #define flush_cache_kmaps()	flush_cache_all()
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #endif /* __KERNEL__ */
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #endif /* _ASM_HIGHMEM_H */
59