1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/arch/sh/mm/init.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 1999 Niibe Yutaka
6*4882a593Smuzhiyun * Copyright (C) 2002 - 2011 Paul Mundt
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Based on linux/arch/i386/mm/init.c:
9*4882a593Smuzhiyun * Copyright (C) 1995 Linus Torvalds
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun #include <linux/mm.h>
12*4882a593Smuzhiyun #include <linux/swap.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/gfp.h>
15*4882a593Smuzhiyun #include <linux/memblock.h>
16*4882a593Smuzhiyun #include <linux/proc_fs.h>
17*4882a593Smuzhiyun #include <linux/pagemap.h>
18*4882a593Smuzhiyun #include <linux/percpu.h>
19*4882a593Smuzhiyun #include <linux/io.h>
20*4882a593Smuzhiyun #include <linux/dma-mapping.h>
21*4882a593Smuzhiyun #include <linux/export.h>
22*4882a593Smuzhiyun #include <asm/mmu_context.h>
23*4882a593Smuzhiyun #include <asm/mmzone.h>
24*4882a593Smuzhiyun #include <asm/kexec.h>
25*4882a593Smuzhiyun #include <asm/tlb.h>
26*4882a593Smuzhiyun #include <asm/cacheflush.h>
27*4882a593Smuzhiyun #include <asm/sections.h>
28*4882a593Smuzhiyun #include <asm/setup.h>
29*4882a593Smuzhiyun #include <asm/cache.h>
30*4882a593Smuzhiyun #include <asm/pgalloc.h>
31*4882a593Smuzhiyun #include <linux/sizes.h>
32*4882a593Smuzhiyun #include "ioremap.h"
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun pgd_t swapper_pg_dir[PTRS_PER_PGD];
35*4882a593Smuzhiyun
generic_mem_init(void)36*4882a593Smuzhiyun void __init generic_mem_init(void)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun memblock_add(__MEMORY_START, __MEMORY_SIZE);
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
plat_mem_setup(void)41*4882a593Smuzhiyun void __init __weak plat_mem_setup(void)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun /* Nothing to see here, move along. */
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun #ifdef CONFIG_MMU
__get_pte_phys(unsigned long addr)47*4882a593Smuzhiyun static pte_t *__get_pte_phys(unsigned long addr)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun pgd_t *pgd;
50*4882a593Smuzhiyun p4d_t *p4d;
51*4882a593Smuzhiyun pud_t *pud;
52*4882a593Smuzhiyun pmd_t *pmd;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun pgd = pgd_offset_k(addr);
55*4882a593Smuzhiyun if (pgd_none(*pgd)) {
56*4882a593Smuzhiyun pgd_ERROR(*pgd);
57*4882a593Smuzhiyun return NULL;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun p4d = p4d_alloc(NULL, pgd, addr);
61*4882a593Smuzhiyun if (unlikely(!p4d)) {
62*4882a593Smuzhiyun p4d_ERROR(*p4d);
63*4882a593Smuzhiyun return NULL;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun pud = pud_alloc(NULL, p4d, addr);
67*4882a593Smuzhiyun if (unlikely(!pud)) {
68*4882a593Smuzhiyun pud_ERROR(*pud);
69*4882a593Smuzhiyun return NULL;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun pmd = pmd_alloc(NULL, pud, addr);
73*4882a593Smuzhiyun if (unlikely(!pmd)) {
74*4882a593Smuzhiyun pmd_ERROR(*pmd);
75*4882a593Smuzhiyun return NULL;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun return pte_offset_kernel(pmd, addr);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
set_pte_phys(unsigned long addr,unsigned long phys,pgprot_t prot)81*4882a593Smuzhiyun static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun pte_t *pte;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun pte = __get_pte_phys(addr);
86*4882a593Smuzhiyun if (!pte_none(*pte)) {
87*4882a593Smuzhiyun pte_ERROR(*pte);
88*4882a593Smuzhiyun return;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
92*4882a593Smuzhiyun local_flush_tlb_one(get_asid(), addr);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun if (pgprot_val(prot) & _PAGE_WIRED)
95*4882a593Smuzhiyun tlb_wire_entry(NULL, addr, *pte);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
clear_pte_phys(unsigned long addr,pgprot_t prot)98*4882a593Smuzhiyun static void clear_pte_phys(unsigned long addr, pgprot_t prot)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun pte_t *pte;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun pte = __get_pte_phys(addr);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun if (pgprot_val(prot) & _PAGE_WIRED)
105*4882a593Smuzhiyun tlb_unwire_entry();
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun set_pte(pte, pfn_pte(0, __pgprot(0)));
108*4882a593Smuzhiyun local_flush_tlb_one(get_asid(), addr);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
__set_fixmap(enum fixed_addresses idx,unsigned long phys,pgprot_t prot)111*4882a593Smuzhiyun void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun unsigned long address = __fix_to_virt(idx);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun if (idx >= __end_of_fixed_addresses) {
116*4882a593Smuzhiyun BUG();
117*4882a593Smuzhiyun return;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun set_pte_phys(address, phys, prot);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
__clear_fixmap(enum fixed_addresses idx,pgprot_t prot)123*4882a593Smuzhiyun void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun unsigned long address = __fix_to_virt(idx);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun if (idx >= __end_of_fixed_addresses) {
128*4882a593Smuzhiyun BUG();
129*4882a593Smuzhiyun return;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun clear_pte_phys(address, prot);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
one_md_table_init(pud_t * pud)135*4882a593Smuzhiyun static pmd_t * __init one_md_table_init(pud_t *pud)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun if (pud_none(*pud)) {
138*4882a593Smuzhiyun pmd_t *pmd;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
141*4882a593Smuzhiyun if (!pmd)
142*4882a593Smuzhiyun panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
143*4882a593Smuzhiyun __func__, PAGE_SIZE, PAGE_SIZE);
144*4882a593Smuzhiyun pud_populate(&init_mm, pud, pmd);
145*4882a593Smuzhiyun BUG_ON(pmd != pmd_offset(pud, 0));
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun return pmd_offset(pud, 0);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
one_page_table_init(pmd_t * pmd)151*4882a593Smuzhiyun static pte_t * __init one_page_table_init(pmd_t *pmd)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun if (pmd_none(*pmd)) {
154*4882a593Smuzhiyun pte_t *pte;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
157*4882a593Smuzhiyun if (!pte)
158*4882a593Smuzhiyun panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
159*4882a593Smuzhiyun __func__, PAGE_SIZE, PAGE_SIZE);
160*4882a593Smuzhiyun pmd_populate_kernel(&init_mm, pmd, pte);
161*4882a593Smuzhiyun BUG_ON(pte != pte_offset_kernel(pmd, 0));
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun return pte_offset_kernel(pmd, 0);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
page_table_kmap_check(pte_t * pte,pmd_t * pmd,unsigned long vaddr,pte_t * lastpte)167*4882a593Smuzhiyun static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
168*4882a593Smuzhiyun unsigned long vaddr, pte_t *lastpte)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun return pte;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
page_table_range_init(unsigned long start,unsigned long end,pgd_t * pgd_base)173*4882a593Smuzhiyun void __init page_table_range_init(unsigned long start, unsigned long end,
174*4882a593Smuzhiyun pgd_t *pgd_base)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun pgd_t *pgd;
177*4882a593Smuzhiyun pud_t *pud;
178*4882a593Smuzhiyun pmd_t *pmd;
179*4882a593Smuzhiyun pte_t *pte = NULL;
180*4882a593Smuzhiyun int i, j, k;
181*4882a593Smuzhiyun unsigned long vaddr;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun vaddr = start;
184*4882a593Smuzhiyun i = pgd_index(vaddr);
185*4882a593Smuzhiyun j = pud_index(vaddr);
186*4882a593Smuzhiyun k = pmd_index(vaddr);
187*4882a593Smuzhiyun pgd = pgd_base + i;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
190*4882a593Smuzhiyun pud = (pud_t *)pgd;
191*4882a593Smuzhiyun for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
192*4882a593Smuzhiyun pmd = one_md_table_init(pud);
193*4882a593Smuzhiyun #ifndef __PAGETABLE_PMD_FOLDED
194*4882a593Smuzhiyun pmd += k;
195*4882a593Smuzhiyun #endif
196*4882a593Smuzhiyun for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
197*4882a593Smuzhiyun pte = page_table_kmap_check(one_page_table_init(pmd),
198*4882a593Smuzhiyun pmd, vaddr, pte);
199*4882a593Smuzhiyun vaddr += PMD_SIZE;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun k = 0;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun j = 0;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun #endif /* CONFIG_MMU */
207*4882a593Smuzhiyun
allocate_pgdat(unsigned int nid)208*4882a593Smuzhiyun void __init allocate_pgdat(unsigned int nid)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun unsigned long start_pfn, end_pfn;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun #ifdef CONFIG_NEED_MULTIPLE_NODES
215*4882a593Smuzhiyun NODE_DATA(nid) = memblock_alloc_try_nid(
216*4882a593Smuzhiyun sizeof(struct pglist_data),
217*4882a593Smuzhiyun SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
218*4882a593Smuzhiyun MEMBLOCK_ALLOC_ACCESSIBLE, nid);
219*4882a593Smuzhiyun if (!NODE_DATA(nid))
220*4882a593Smuzhiyun panic("Can't allocate pgdat for node %d\n", nid);
221*4882a593Smuzhiyun #endif
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun NODE_DATA(nid)->node_start_pfn = start_pfn;
224*4882a593Smuzhiyun NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
do_init_bootmem(void)227*4882a593Smuzhiyun static void __init do_init_bootmem(void)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun unsigned long start_pfn, end_pfn;
230*4882a593Smuzhiyun int i;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /* Add active regions with valid PFNs. */
233*4882a593Smuzhiyun for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL)
234*4882a593Smuzhiyun __add_active_range(0, start_pfn, end_pfn);
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /* All of system RAM sits in node 0 for the non-NUMA case */
237*4882a593Smuzhiyun allocate_pgdat(0);
238*4882a593Smuzhiyun node_set_online(0);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun plat_mem_setup();
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun sparse_init();
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
early_reserve_mem(void)245*4882a593Smuzhiyun static void __init early_reserve_mem(void)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun unsigned long start_pfn;
248*4882a593Smuzhiyun u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
249*4882a593Smuzhiyun u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /*
252*4882a593Smuzhiyun * Partially used pages are not usable - thus
253*4882a593Smuzhiyun * we are rounding upwards:
254*4882a593Smuzhiyun */
255*4882a593Smuzhiyun start_pfn = PFN_UP(__pa(_end));
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /*
258*4882a593Smuzhiyun * Reserve the kernel text and Reserve the bootmem bitmap. We do
259*4882a593Smuzhiyun * this in two steps (first step was init_bootmem()), because
260*4882a593Smuzhiyun * this catches the (definitely buggy) case of us accidentally
261*4882a593Smuzhiyun * initializing the bootmem allocator with an invalid RAM area.
262*4882a593Smuzhiyun */
263*4882a593Smuzhiyun memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /*
266*4882a593Smuzhiyun * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
267*4882a593Smuzhiyun */
268*4882a593Smuzhiyun if (CONFIG_ZERO_PAGE_OFFSET != 0)
269*4882a593Smuzhiyun memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /*
272*4882a593Smuzhiyun * Handle additional early reservations
273*4882a593Smuzhiyun */
274*4882a593Smuzhiyun check_for_initrd();
275*4882a593Smuzhiyun reserve_crashkernel();
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
paging_init(void)278*4882a593Smuzhiyun void __init paging_init(void)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun unsigned long max_zone_pfns[MAX_NR_ZONES];
281*4882a593Smuzhiyun unsigned long vaddr, end;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun sh_mv.mv_mem_init();
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun early_reserve_mem();
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /*
288*4882a593Smuzhiyun * Once the early reservations are out of the way, give the
289*4882a593Smuzhiyun * platforms a chance to kick out some memory.
290*4882a593Smuzhiyun */
291*4882a593Smuzhiyun if (sh_mv.mv_mem_reserve)
292*4882a593Smuzhiyun sh_mv.mv_mem_reserve();
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun memblock_enforce_memory_limit(memory_limit);
295*4882a593Smuzhiyun memblock_allow_resize();
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun memblock_dump_all();
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /*
300*4882a593Smuzhiyun * Determine low and high memory ranges:
301*4882a593Smuzhiyun */
302*4882a593Smuzhiyun max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
303*4882a593Smuzhiyun min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun nodes_clear(node_online_map);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun memory_start = (unsigned long)__va(__MEMORY_START);
308*4882a593Smuzhiyun memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun uncached_init();
311*4882a593Smuzhiyun pmb_init();
312*4882a593Smuzhiyun do_init_bootmem();
313*4882a593Smuzhiyun ioremap_fixed_init();
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun /* We don't need to map the kernel through the TLB, as
316*4882a593Smuzhiyun * it is permanatly mapped using P1. So clear the
317*4882a593Smuzhiyun * entire pgd. */
318*4882a593Smuzhiyun memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /* Set an initial value for the MMU.TTB so we don't have to
321*4882a593Smuzhiyun * check for a null value. */
322*4882a593Smuzhiyun set_TTB(swapper_pg_dir);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /*
325*4882a593Smuzhiyun * Populate the relevant portions of swapper_pg_dir so that
326*4882a593Smuzhiyun * we can use the fixmap entries without calling kmalloc.
327*4882a593Smuzhiyun * pte's will be filled in by __set_fixmap().
328*4882a593Smuzhiyun */
329*4882a593Smuzhiyun vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
330*4882a593Smuzhiyun end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
331*4882a593Smuzhiyun page_table_range_init(vaddr, end, swapper_pg_dir);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun kmap_coherent_init();
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
336*4882a593Smuzhiyun max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
337*4882a593Smuzhiyun free_area_init(max_zone_pfns);
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun unsigned int mem_init_done = 0;
341*4882a593Smuzhiyun
mem_init(void)342*4882a593Smuzhiyun void __init mem_init(void)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun pg_data_t *pgdat;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun high_memory = NULL;
347*4882a593Smuzhiyun for_each_online_pgdat(pgdat)
348*4882a593Smuzhiyun high_memory = max_t(void *, high_memory,
349*4882a593Smuzhiyun __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun memblock_free_all();
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* Set this up early, so we can take care of the zero page */
354*4882a593Smuzhiyun cpu_cache_init();
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /* clear the zero-page */
357*4882a593Smuzhiyun memset(empty_zero_page, 0, PAGE_SIZE);
358*4882a593Smuzhiyun __flush_wback_region(empty_zero_page, PAGE_SIZE);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun vsyscall_init();
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun mem_init_print_info(NULL);
363*4882a593Smuzhiyun pr_info("virtual kernel memory layout:\n"
364*4882a593Smuzhiyun " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
365*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
366*4882a593Smuzhiyun " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
367*4882a593Smuzhiyun #endif
368*4882a593Smuzhiyun " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
369*4882a593Smuzhiyun " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n"
370*4882a593Smuzhiyun #ifdef CONFIG_UNCACHED_MAPPING
371*4882a593Smuzhiyun " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n"
372*4882a593Smuzhiyun #endif
373*4882a593Smuzhiyun " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
374*4882a593Smuzhiyun " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
375*4882a593Smuzhiyun " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
376*4882a593Smuzhiyun FIXADDR_START, FIXADDR_TOP,
377*4882a593Smuzhiyun (FIXADDR_TOP - FIXADDR_START) >> 10,
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
380*4882a593Smuzhiyun PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
381*4882a593Smuzhiyun (LAST_PKMAP*PAGE_SIZE) >> 10,
382*4882a593Smuzhiyun #endif
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun (unsigned long)VMALLOC_START, VMALLOC_END,
385*4882a593Smuzhiyun (VMALLOC_END - VMALLOC_START) >> 20,
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun (unsigned long)memory_start, (unsigned long)high_memory,
388*4882a593Smuzhiyun ((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun #ifdef CONFIG_UNCACHED_MAPPING
391*4882a593Smuzhiyun uncached_start, uncached_end, uncached_size >> 20,
392*4882a593Smuzhiyun #endif
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun (unsigned long)&__init_begin, (unsigned long)&__init_end,
395*4882a593Smuzhiyun ((unsigned long)&__init_end -
396*4882a593Smuzhiyun (unsigned long)&__init_begin) >> 10,
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun (unsigned long)&_etext, (unsigned long)&_edata,
399*4882a593Smuzhiyun ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun (unsigned long)&_text, (unsigned long)&_etext,
402*4882a593Smuzhiyun ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun mem_init_done = 1;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_HOTPLUG
arch_add_memory(int nid,u64 start,u64 size,struct mhp_params * params)408*4882a593Smuzhiyun int arch_add_memory(int nid, u64 start, u64 size,
409*4882a593Smuzhiyun struct mhp_params *params)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun unsigned long start_pfn = PFN_DOWN(start);
412*4882a593Smuzhiyun unsigned long nr_pages = size >> PAGE_SHIFT;
413*4882a593Smuzhiyun int ret;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
416*4882a593Smuzhiyun return -EINVAL;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /* We only have ZONE_NORMAL, so this is easy.. */
419*4882a593Smuzhiyun ret = __add_pages(nid, start_pfn, nr_pages, params);
420*4882a593Smuzhiyun if (unlikely(ret))
421*4882a593Smuzhiyun printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun return ret;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
arch_remove_memory(int nid,u64 start,u64 size,struct vmem_altmap * altmap)426*4882a593Smuzhiyun void arch_remove_memory(int nid, u64 start, u64 size,
427*4882a593Smuzhiyun struct vmem_altmap *altmap)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun unsigned long start_pfn = PFN_DOWN(start);
430*4882a593Smuzhiyun unsigned long nr_pages = size >> PAGE_SHIFT;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun __remove_pages(start_pfn, nr_pages, altmap);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun #endif /* CONFIG_MEMORY_HOTPLUG */
435