1*4882a593Smuzhiyun /* 2*4882a593Smuzhiyun * Kernel virtual memory layout definitions. 3*4882a593Smuzhiyun * 4*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General 5*4882a593Smuzhiyun * Public License. See the file "COPYING" in the main directory of 6*4882a593Smuzhiyun * this archive for more details. 7*4882a593Smuzhiyun * 8*4882a593Smuzhiyun * Copyright (C) 2016 Cadence Design Systems Inc. 9*4882a593Smuzhiyun */ 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun #ifndef _XTENSA_KMEM_LAYOUT_H 12*4882a593Smuzhiyun #define _XTENSA_KMEM_LAYOUT_H 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun #include <asm/core.h> 15*4882a593Smuzhiyun #include <asm/types.h> 16*4882a593Smuzhiyun 17*4882a593Smuzhiyun #ifdef CONFIG_MMU 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun /* 20*4882a593Smuzhiyun * Fixed TLB translations in the processor. 21*4882a593Smuzhiyun */ 22*4882a593Smuzhiyun 23*4882a593Smuzhiyun #define XCHAL_PAGE_TABLE_VADDR __XTENSA_UL_CONST(0x80000000) 24*4882a593Smuzhiyun #define XCHAL_PAGE_TABLE_SIZE __XTENSA_UL_CONST(0x00400000) 25*4882a593Smuzhiyun 26*4882a593Smuzhiyun #if defined(CONFIG_XTENSA_KSEG_MMU_V2) 27*4882a593Smuzhiyun 28*4882a593Smuzhiyun #define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000) 29*4882a593Smuzhiyun #define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000) 30*4882a593Smuzhiyun #define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000) 31*4882a593Smuzhiyun #define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x08000000) 32*4882a593Smuzhiyun #define XCHAL_KSEG_TLB_WAY 5 33*4882a593Smuzhiyun #define XCHAL_KIO_TLB_WAY 6 34*4882a593Smuzhiyun 35*4882a593Smuzhiyun #elif defined(CONFIG_XTENSA_KSEG_256M) 36*4882a593Smuzhiyun 37*4882a593Smuzhiyun #define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xb0000000) 38*4882a593Smuzhiyun #define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xc0000000) 39*4882a593Smuzhiyun #define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x10000000) 40*4882a593Smuzhiyun #define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000) 41*4882a593Smuzhiyun #define XCHAL_KSEG_TLB_WAY 6 42*4882a593Smuzhiyun #define XCHAL_KIO_TLB_WAY 6 43*4882a593Smuzhiyun 44*4882a593Smuzhiyun #elif defined(CONFIG_XTENSA_KSEG_512M) 45*4882a593Smuzhiyun 46*4882a593Smuzhiyun #define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xa0000000) 47*4882a593Smuzhiyun #define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xc0000000) 48*4882a593Smuzhiyun #define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x20000000) 49*4882a593Smuzhiyun #define XCHAL_KSEG_ALIGNMENT __XTENSA_UL_CONST(0x10000000) 50*4882a593Smuzhiyun #define XCHAL_KSEG_TLB_WAY 6 51*4882a593Smuzhiyun #define XCHAL_KIO_TLB_WAY 6 52*4882a593Smuzhiyun 53*4882a593Smuzhiyun #else 54*4882a593Smuzhiyun #error Unsupported KSEG configuration 55*4882a593Smuzhiyun #endif 56*4882a593Smuzhiyun 57*4882a593Smuzhiyun #ifdef CONFIG_KSEG_PADDR 58*4882a593Smuzhiyun #define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(CONFIG_KSEG_PADDR) 59*4882a593Smuzhiyun #else 60*4882a593Smuzhiyun #define XCHAL_KSEG_PADDR __XTENSA_UL_CONST(0x00000000) 61*4882a593Smuzhiyun #endif 62*4882a593Smuzhiyun 63*4882a593Smuzhiyun #if XCHAL_KSEG_PADDR & (XCHAL_KSEG_ALIGNMENT - 1) 64*4882a593Smuzhiyun #error XCHAL_KSEG_PADDR is not properly aligned to XCHAL_KSEG_ALIGNMENT 65*4882a593Smuzhiyun #endif 66*4882a593Smuzhiyun 67*4882a593Smuzhiyun #endif 68*4882a593Smuzhiyun 69*4882a593Smuzhiyun /* KIO definition */ 70*4882a593Smuzhiyun 71*4882a593Smuzhiyun #if XCHAL_HAVE_PTP_MMU 72*4882a593Smuzhiyun #define XCHAL_KIO_CACHED_VADDR 0xe0000000 73*4882a593Smuzhiyun #define XCHAL_KIO_BYPASS_VADDR 0xf0000000 74*4882a593Smuzhiyun #define XCHAL_KIO_DEFAULT_PADDR 0xf0000000 75*4882a593Smuzhiyun #else 76*4882a593Smuzhiyun #define XCHAL_KIO_BYPASS_VADDR XCHAL_KIO_PADDR 77*4882a593Smuzhiyun #define XCHAL_KIO_DEFAULT_PADDR 0x90000000 78*4882a593Smuzhiyun #endif 79*4882a593Smuzhiyun #define XCHAL_KIO_SIZE 0x10000000 80*4882a593Smuzhiyun 81*4882a593Smuzhiyun #if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_USE_OF) 82*4882a593Smuzhiyun #define XCHAL_KIO_PADDR xtensa_get_kio_paddr() 83*4882a593Smuzhiyun #ifndef __ASSEMBLY__ 84*4882a593Smuzhiyun extern unsigned long xtensa_kio_paddr; 85*4882a593Smuzhiyun xtensa_get_kio_paddr(void)86*4882a593Smuzhiyunstatic inline unsigned long xtensa_get_kio_paddr(void) 87*4882a593Smuzhiyun { 88*4882a593Smuzhiyun return xtensa_kio_paddr; 89*4882a593Smuzhiyun } 90*4882a593Smuzhiyun #endif 91*4882a593Smuzhiyun #else 92*4882a593Smuzhiyun #define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR 93*4882a593Smuzhiyun #endif 94*4882a593Smuzhiyun 95*4882a593Smuzhiyun /* KERNEL_STACK definition */ 96*4882a593Smuzhiyun 97*4882a593Smuzhiyun #ifndef CONFIG_KASAN 98*4882a593Smuzhiyun #define KERNEL_STACK_SHIFT 13 99*4882a593Smuzhiyun #else 100*4882a593Smuzhiyun #define KERNEL_STACK_SHIFT 15 101*4882a593Smuzhiyun #endif 102*4882a593Smuzhiyun #define KERNEL_STACK_SIZE (1 << KERNEL_STACK_SHIFT) 103*4882a593Smuzhiyun 104*4882a593Smuzhiyun #endif 105