1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun #ifndef _ASM_HIGHMEM_H 3*4882a593Smuzhiyun #define _ASM_HIGHMEM_H 4*4882a593Smuzhiyun 5*4882a593Smuzhiyun #include <asm/kmap_types.h> 6*4882a593Smuzhiyun 7*4882a593Smuzhiyun #define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE) 8*4882a593Smuzhiyun #define LAST_PKMAP PTRS_PER_PTE 9*4882a593Smuzhiyun #define LAST_PKMAP_MASK (LAST_PKMAP - 1) 10*4882a593Smuzhiyun #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT) 11*4882a593Smuzhiyun #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 12*4882a593Smuzhiyun 13*4882a593Smuzhiyun #define flush_cache_kmaps() \ 14*4882a593Smuzhiyun do { \ 15*4882a593Smuzhiyun if (cache_is_vivt()) \ 16*4882a593Smuzhiyun flush_cache_all(); \ 17*4882a593Smuzhiyun } while (0) 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun extern pte_t *pkmap_page_table; 20*4882a593Smuzhiyun 21*4882a593Smuzhiyun /* 22*4882a593Smuzhiyun * The reason for kmap_high_get() is to ensure that the currently kmap'd 23*4882a593Smuzhiyun * page usage count does not decrease to zero while we're using its 24*4882a593Smuzhiyun * existing virtual mapping in an atomic context. With a VIVT cache this 25*4882a593Smuzhiyun * is essential to do, but with a VIPT cache this is only an optimization 26*4882a593Smuzhiyun * so not to pay the price of establishing a second mapping if an existing 27*4882a593Smuzhiyun * one can be used. However, on platforms without hardware TLB maintenance 28*4882a593Smuzhiyun * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since 29*4882a593Smuzhiyun * the locking involved must also disable IRQs which is incompatible with 30*4882a593Smuzhiyun * the IPI mechanism used by global TLB operations. 31*4882a593Smuzhiyun */ 32*4882a593Smuzhiyun #define ARCH_NEEDS_KMAP_HIGH_GET 33*4882a593Smuzhiyun #if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6) 34*4882a593Smuzhiyun #undef ARCH_NEEDS_KMAP_HIGH_GET 35*4882a593Smuzhiyun #if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT) 36*4882a593Smuzhiyun #error "The sum of features in your kernel config cannot be supported together" 37*4882a593Smuzhiyun #endif 38*4882a593Smuzhiyun #endif 39*4882a593Smuzhiyun 40*4882a593Smuzhiyun /* 41*4882a593Smuzhiyun * Needed to be able to broadcast the TLB invalidation for kmap. 42*4882a593Smuzhiyun */ 43*4882a593Smuzhiyun #ifdef CONFIG_ARM_ERRATA_798181 44*4882a593Smuzhiyun #undef ARCH_NEEDS_KMAP_HIGH_GET 45*4882a593Smuzhiyun #endif 46*4882a593Smuzhiyun 47*4882a593Smuzhiyun #ifdef ARCH_NEEDS_KMAP_HIGH_GET 48*4882a593Smuzhiyun extern void *kmap_high_get(struct page *page); 49*4882a593Smuzhiyun #else kmap_high_get(struct page * page)50*4882a593Smuzhiyunstatic inline void *kmap_high_get(struct page *page) 51*4882a593Smuzhiyun { 52*4882a593Smuzhiyun return NULL; 53*4882a593Smuzhiyun } 54*4882a593Smuzhiyun #endif 55*4882a593Smuzhiyun 56*4882a593Smuzhiyun /* 57*4882a593Smuzhiyun * The following functions are already defined by <linux/highmem.h> 58*4882a593Smuzhiyun * when CONFIG_HIGHMEM is not set. 59*4882a593Smuzhiyun */ 60*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM 61*4882a593Smuzhiyun extern void *kmap_atomic_pfn(unsigned long pfn); 62*4882a593Smuzhiyun #endif 63*4882a593Smuzhiyun 64*4882a593Smuzhiyun #endif 65