1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99 4*4882a593Smuzhiyun * Adapted for the alpha wildfire architecture Jan 2001. 5*4882a593Smuzhiyun */ 6*4882a593Smuzhiyun #ifndef _ASM_MMZONE_H_ 7*4882a593Smuzhiyun #define _ASM_MMZONE_H_ 8*4882a593Smuzhiyun 9*4882a593Smuzhiyun #include <asm/smp.h> 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun /* 12*4882a593Smuzhiyun * Following are macros that are specific to this numa platform. 13*4882a593Smuzhiyun */ 14*4882a593Smuzhiyun 15*4882a593Smuzhiyun extern pg_data_t node_data[]; 16*4882a593Smuzhiyun 17*4882a593Smuzhiyun #define alpha_pa_to_nid(pa) \ 18*4882a593Smuzhiyun (alpha_mv.pa_to_nid \ 19*4882a593Smuzhiyun ? alpha_mv.pa_to_nid(pa) \ 20*4882a593Smuzhiyun : (0)) 21*4882a593Smuzhiyun #define node_mem_start(nid) \ 22*4882a593Smuzhiyun (alpha_mv.node_mem_start \ 23*4882a593Smuzhiyun ? alpha_mv.node_mem_start(nid) \ 24*4882a593Smuzhiyun : (0UL)) 25*4882a593Smuzhiyun #define node_mem_size(nid) \ 26*4882a593Smuzhiyun (alpha_mv.node_mem_size \ 27*4882a593Smuzhiyun ? alpha_mv.node_mem_size(nid) \ 28*4882a593Smuzhiyun : ((nid) ? (0UL) : (~0UL))) 29*4882a593Smuzhiyun 30*4882a593Smuzhiyun #define pa_to_nid(pa) alpha_pa_to_nid(pa) 31*4882a593Smuzhiyun #define NODE_DATA(nid) (&node_data[(nid)]) 32*4882a593Smuzhiyun 33*4882a593Smuzhiyun #define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn) 34*4882a593Smuzhiyun 35*4882a593Smuzhiyun #if 1 36*4882a593Smuzhiyun #define PLAT_NODE_DATA_LOCALNR(p, n) \ 37*4882a593Smuzhiyun (((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn) 38*4882a593Smuzhiyun #else 39*4882a593Smuzhiyun static inline unsigned long PLAT_NODE_DATA_LOCALNR(unsigned long p,int n)40*4882a593SmuzhiyunPLAT_NODE_DATA_LOCALNR(unsigned long p, int n) 41*4882a593Smuzhiyun { 42*4882a593Smuzhiyun unsigned long temp; 43*4882a593Smuzhiyun temp = p >> PAGE_SHIFT; 44*4882a593Smuzhiyun return temp - PLAT_NODE_DATA(n)->gendata.node_start_pfn; 45*4882a593Smuzhiyun } 46*4882a593Smuzhiyun #endif 47*4882a593Smuzhiyun 48*4882a593Smuzhiyun #ifdef CONFIG_DISCONTIGMEM 49*4882a593Smuzhiyun 50*4882a593Smuzhiyun /* 51*4882a593Smuzhiyun * Following are macros that each numa implementation must define. 52*4882a593Smuzhiyun */ 53*4882a593Smuzhiyun 54*4882a593Smuzhiyun /* 55*4882a593Smuzhiyun * Given a kernel address, find the home node of the underlying memory. 56*4882a593Smuzhiyun */ 57*4882a593Smuzhiyun #define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr)) 58*4882a593Smuzhiyun 59*4882a593Smuzhiyun /* 60*4882a593Smuzhiyun * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory 61*4882a593Smuzhiyun * and returns the kaddr corresponding to first physical page in the 62*4882a593Smuzhiyun * node's mem_map. 63*4882a593Smuzhiyun */ 64*4882a593Smuzhiyun #define LOCAL_BASE_ADDR(kaddr) \ 65*4882a593Smuzhiyun ((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn \ 66*4882a593Smuzhiyun << PAGE_SHIFT)) 67*4882a593Smuzhiyun 68*4882a593Smuzhiyun /* XXX: FIXME -- nyc */ 69*4882a593Smuzhiyun #define kern_addr_valid(kaddr) (0) 70*4882a593Smuzhiyun 71*4882a593Smuzhiyun #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 72*4882a593Smuzhiyun 73*4882a593Smuzhiyun #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32)) 74*4882a593Smuzhiyun #define pte_pfn(pte) (pte_val(pte) >> 32) 75*4882a593Smuzhiyun 76*4882a593Smuzhiyun #define mk_pte(page, pgprot) \ 77*4882a593Smuzhiyun ({ \ 78*4882a593Smuzhiyun pte_t pte; \ 79*4882a593Smuzhiyun unsigned long pfn; \ 80*4882a593Smuzhiyun \ 81*4882a593Smuzhiyun pfn = page_to_pfn(page) << 32; \ 82*4882a593Smuzhiyun pte_val(pte) = pfn | pgprot_val(pgprot); \ 83*4882a593Smuzhiyun \ 84*4882a593Smuzhiyun pte; \ 85*4882a593Smuzhiyun }) 86*4882a593Smuzhiyun 87*4882a593Smuzhiyun #define pte_page(x) \ 88*4882a593Smuzhiyun ({ \ 89*4882a593Smuzhiyun unsigned long kvirt; \ 90*4882a593Smuzhiyun struct page * __xx; \ 91*4882a593Smuzhiyun \ 92*4882a593Smuzhiyun kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT)); \ 93*4882a593Smuzhiyun __xx = virt_to_page(kvirt); \ 94*4882a593Smuzhiyun \ 95*4882a593Smuzhiyun __xx; \ 96*4882a593Smuzhiyun }) 97*4882a593Smuzhiyun 98*4882a593Smuzhiyun #define page_to_pa(page) \ 99*4882a593Smuzhiyun (page_to_pfn(page) << PAGE_SHIFT) 100*4882a593Smuzhiyun 101*4882a593Smuzhiyun #define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT)) 102*4882a593Smuzhiyun #define pfn_valid(pfn) \ 103*4882a593Smuzhiyun (((pfn) - node_start_pfn(pfn_to_nid(pfn))) < \ 104*4882a593Smuzhiyun node_spanned_pages(pfn_to_nid(pfn))) \ 105*4882a593Smuzhiyun 106*4882a593Smuzhiyun #define virt_addr_valid(kaddr) pfn_valid((__pa(kaddr) >> PAGE_SHIFT)) 107*4882a593Smuzhiyun 108*4882a593Smuzhiyun #endif /* CONFIG_DISCONTIGMEM */ 109*4882a593Smuzhiyun 110*4882a593Smuzhiyun #endif /* _ASM_MMZONE_H_ */ 111