xref: /OK3568_Linux_fs/kernel/arch/powerpc/mm/mem.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  PowerPC version
4*4882a593Smuzhiyun  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7*4882a593Smuzhiyun  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
8*4882a593Smuzhiyun  *    Copyright (C) 1996 Paul Mackerras
9*4882a593Smuzhiyun  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  *  Derived from "arch/i386/mm/init.c"
12*4882a593Smuzhiyun  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/export.h>
16*4882a593Smuzhiyun #include <linux/sched.h>
17*4882a593Smuzhiyun #include <linux/kernel.h>
18*4882a593Smuzhiyun #include <linux/errno.h>
19*4882a593Smuzhiyun #include <linux/string.h>
20*4882a593Smuzhiyun #include <linux/gfp.h>
21*4882a593Smuzhiyun #include <linux/types.h>
22*4882a593Smuzhiyun #include <linux/mm.h>
23*4882a593Smuzhiyun #include <linux/stddef.h>
24*4882a593Smuzhiyun #include <linux/init.h>
25*4882a593Smuzhiyun #include <linux/memblock.h>
26*4882a593Smuzhiyun #include <linux/highmem.h>
27*4882a593Smuzhiyun #include <linux/initrd.h>
28*4882a593Smuzhiyun #include <linux/pagemap.h>
29*4882a593Smuzhiyun #include <linux/suspend.h>
30*4882a593Smuzhiyun #include <linux/hugetlb.h>
31*4882a593Smuzhiyun #include <linux/slab.h>
32*4882a593Smuzhiyun #include <linux/vmalloc.h>
33*4882a593Smuzhiyun #include <linux/memremap.h>
34*4882a593Smuzhiyun #include <linux/dma-direct.h>
35*4882a593Smuzhiyun #include <linux/kprobes.h>
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #include <asm/prom.h>
38*4882a593Smuzhiyun #include <asm/io.h>
39*4882a593Smuzhiyun #include <asm/mmu_context.h>
40*4882a593Smuzhiyun #include <asm/mmu.h>
41*4882a593Smuzhiyun #include <asm/smp.h>
42*4882a593Smuzhiyun #include <asm/machdep.h>
43*4882a593Smuzhiyun #include <asm/btext.h>
44*4882a593Smuzhiyun #include <asm/tlb.h>
45*4882a593Smuzhiyun #include <asm/sections.h>
46*4882a593Smuzhiyun #include <asm/sparsemem.h>
47*4882a593Smuzhiyun #include <asm/vdso.h>
48*4882a593Smuzhiyun #include <asm/fixmap.h>
49*4882a593Smuzhiyun #include <asm/swiotlb.h>
50*4882a593Smuzhiyun #include <asm/rtas.h>
51*4882a593Smuzhiyun #include <asm/kasan.h>
52*4882a593Smuzhiyun #include <asm/svm.h>
53*4882a593Smuzhiyun #include <asm/mmzone.h>
54*4882a593Smuzhiyun #include <asm/ftrace.h>
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #include <mm/mmu_decl.h>
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #ifndef CPU_FTR_COHERENT_ICACHE
59*4882a593Smuzhiyun #define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
60*4882a593Smuzhiyun #define CPU_FTR_NOEXECUTE	0
61*4882a593Smuzhiyun #endif
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun unsigned long long memory_limit;
64*4882a593Smuzhiyun bool init_mem_is_free;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
67*4882a593Smuzhiyun pte_t *kmap_pte;
68*4882a593Smuzhiyun EXPORT_SYMBOL(kmap_pte);
69*4882a593Smuzhiyun #endif
70*4882a593Smuzhiyun 
phys_mem_access_prot(struct file * file,unsigned long pfn,unsigned long size,pgprot_t vma_prot)71*4882a593Smuzhiyun pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
72*4882a593Smuzhiyun 			      unsigned long size, pgprot_t vma_prot)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	if (ppc_md.phys_mem_access_prot)
75*4882a593Smuzhiyun 		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	if (!page_is_ram(pfn))
78*4882a593Smuzhiyun 		vma_prot = pgprot_noncached(vma_prot);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	return vma_prot;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun EXPORT_SYMBOL(phys_mem_access_prot);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_HOTPLUG
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #ifdef CONFIG_NUMA
memory_add_physaddr_to_nid(u64 start)87*4882a593Smuzhiyun int memory_add_physaddr_to_nid(u64 start)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	return hot_add_scn_to_nid(start);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun #endif
92*4882a593Smuzhiyun 
create_section_mapping(unsigned long start,unsigned long end,int nid,pgprot_t prot)93*4882a593Smuzhiyun int __weak create_section_mapping(unsigned long start, unsigned long end,
94*4882a593Smuzhiyun 				  int nid, pgprot_t prot)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	return -ENODEV;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
remove_section_mapping(unsigned long start,unsigned long end)99*4882a593Smuzhiyun int __weak remove_section_mapping(unsigned long start, unsigned long end)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	return -ENODEV;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun #define FLUSH_CHUNK_SIZE SZ_1G
105*4882a593Smuzhiyun /**
106*4882a593Smuzhiyun  * flush_dcache_range_chunked(): Write any modified data cache blocks out to
107*4882a593Smuzhiyun  * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
108*4882a593Smuzhiyun  * Does not invalidate the corresponding instruction cache blocks.
109*4882a593Smuzhiyun  *
110*4882a593Smuzhiyun  * @start: the start address
111*4882a593Smuzhiyun  * @stop: the stop address (exclusive)
112*4882a593Smuzhiyun  * @chunk: the max size of the chunks
113*4882a593Smuzhiyun  */
flush_dcache_range_chunked(unsigned long start,unsigned long stop,unsigned long chunk)114*4882a593Smuzhiyun static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
115*4882a593Smuzhiyun 				       unsigned long chunk)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	unsigned long i;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	for (i = start; i < stop; i += chunk) {
120*4882a593Smuzhiyun 		flush_dcache_range(i, min(stop, i + chunk));
121*4882a593Smuzhiyun 		cond_resched();
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
arch_add_memory(int nid,u64 start,u64 size,struct mhp_params * params)125*4882a593Smuzhiyun int __ref arch_add_memory(int nid, u64 start, u64 size,
126*4882a593Smuzhiyun 			  struct mhp_params *params)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	unsigned long start_pfn = start >> PAGE_SHIFT;
129*4882a593Smuzhiyun 	unsigned long nr_pages = size >> PAGE_SHIFT;
130*4882a593Smuzhiyun 	int rc;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	start = (unsigned long)__va(start);
133*4882a593Smuzhiyun 	rc = create_section_mapping(start, start + size, nid,
134*4882a593Smuzhiyun 				    params->pgprot);
135*4882a593Smuzhiyun 	if (rc) {
136*4882a593Smuzhiyun 		pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
137*4882a593Smuzhiyun 			start, start + size, rc);
138*4882a593Smuzhiyun 		return -EFAULT;
139*4882a593Smuzhiyun 	}
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	return __add_pages(nid, start_pfn, nr_pages, params);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
arch_remove_memory(int nid,u64 start,u64 size,struct vmem_altmap * altmap)144*4882a593Smuzhiyun void __ref arch_remove_memory(int nid, u64 start, u64 size,
145*4882a593Smuzhiyun 			     struct vmem_altmap *altmap)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	unsigned long start_pfn = start >> PAGE_SHIFT;
148*4882a593Smuzhiyun 	unsigned long nr_pages = size >> PAGE_SHIFT;
149*4882a593Smuzhiyun 	int ret;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	__remove_pages(start_pfn, nr_pages, altmap);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	/* Remove htab bolted mappings for this section of memory */
154*4882a593Smuzhiyun 	start = (unsigned long)__va(start);
155*4882a593Smuzhiyun 	flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	ret = remove_section_mapping(start, start + size);
158*4882a593Smuzhiyun 	WARN_ON_ONCE(ret);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	/* Ensure all vmalloc mappings are flushed in case they also
161*4882a593Smuzhiyun 	 * hit that section of memory
162*4882a593Smuzhiyun 	 */
163*4882a593Smuzhiyun 	vm_unmap_aliases();
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun #endif
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun #ifndef CONFIG_NEED_MULTIPLE_NODES
mem_topology_setup(void)168*4882a593Smuzhiyun void __init mem_topology_setup(void)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
171*4882a593Smuzhiyun 	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
172*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
173*4882a593Smuzhiyun 	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
174*4882a593Smuzhiyun #endif
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	/* Place all memblock_regions in the same node and merge contiguous
177*4882a593Smuzhiyun 	 * memblock_regions
178*4882a593Smuzhiyun 	 */
179*4882a593Smuzhiyun 	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
initmem_init(void)182*4882a593Smuzhiyun void __init initmem_init(void)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	sparse_init();
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun /* mark pages that don't exist as nosave */
mark_nonram_nosave(void)188*4882a593Smuzhiyun static int __init mark_nonram_nosave(void)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	unsigned long spfn, epfn, prev = 0;
191*4882a593Smuzhiyun 	int i;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) {
194*4882a593Smuzhiyun 		if (prev && prev < spfn)
195*4882a593Smuzhiyun 			register_nosave_region(prev, spfn);
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 		prev = epfn;
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	return 0;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun #else /* CONFIG_NEED_MULTIPLE_NODES */
mark_nonram_nosave(void)203*4882a593Smuzhiyun static int __init mark_nonram_nosave(void)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	return 0;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun #endif
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun /*
210*4882a593Smuzhiyun  * Zones usage:
211*4882a593Smuzhiyun  *
212*4882a593Smuzhiyun  * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
213*4882a593Smuzhiyun  * everything else. GFP_DMA32 page allocations automatically fall back to
214*4882a593Smuzhiyun  * ZONE_DMA.
215*4882a593Smuzhiyun  *
216*4882a593Smuzhiyun  * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the
217*4882a593Smuzhiyun  * generic DMA mapping code.  32-bit only devices (if not handled by an IOMMU
218*4882a593Smuzhiyun  * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
219*4882a593Smuzhiyun  * ZONE_DMA.
220*4882a593Smuzhiyun  */
221*4882a593Smuzhiyun static unsigned long max_zone_pfns[MAX_NR_ZONES];
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun /*
224*4882a593Smuzhiyun  * paging_init() sets up the page tables - in fact we've already done this.
225*4882a593Smuzhiyun  */
paging_init(void)226*4882a593Smuzhiyun void __init paging_init(void)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	unsigned long long total_ram = memblock_phys_mem_size();
229*4882a593Smuzhiyun 	phys_addr_t top_of_ram = memblock_end_of_DRAM();
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
232*4882a593Smuzhiyun 	unsigned long v = __fix_to_virt(FIX_KMAP_END);
233*4882a593Smuzhiyun 	unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	for (; v < end; v += PAGE_SIZE)
236*4882a593Smuzhiyun 		map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	map_kernel_page(PKMAP_BASE, 0, __pgprot(0));	/* XXX gross */
239*4882a593Smuzhiyun 	pkmap_page_table = virt_to_kpte(PKMAP_BASE);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
242*4882a593Smuzhiyun #endif /* CONFIG_HIGHMEM */
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
245*4882a593Smuzhiyun 	       (unsigned long long)top_of_ram, total_ram);
246*4882a593Smuzhiyun 	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
247*4882a593Smuzhiyun 	       (long int)((top_of_ram - total_ram) >> 20));
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	/*
250*4882a593Smuzhiyun 	 * Allow 30-bit DMA for very limited Broadcom wifi chips on many
251*4882a593Smuzhiyun 	 * powerbooks.
252*4882a593Smuzhiyun 	 */
253*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_PPC32))
254*4882a593Smuzhiyun 		zone_dma_bits = 30;
255*4882a593Smuzhiyun 	else
256*4882a593Smuzhiyun 		zone_dma_bits = 31;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun #ifdef CONFIG_ZONE_DMA
259*4882a593Smuzhiyun 	max_zone_pfns[ZONE_DMA]	= min(max_low_pfn,
260*4882a593Smuzhiyun 				      1UL << (zone_dma_bits - PAGE_SHIFT));
261*4882a593Smuzhiyun #endif
262*4882a593Smuzhiyun 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
263*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
264*4882a593Smuzhiyun 	max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
265*4882a593Smuzhiyun #endif
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	free_area_init(max_zone_pfns);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	mark_nonram_nosave();
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
mem_init(void)272*4882a593Smuzhiyun void __init mem_init(void)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	/*
275*4882a593Smuzhiyun 	 * book3s is limited to 16 page sizes due to encoding this in
276*4882a593Smuzhiyun 	 * a 4-bit field for slices.
277*4882a593Smuzhiyun 	 */
278*4882a593Smuzhiyun 	BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun #ifdef CONFIG_SWIOTLB
281*4882a593Smuzhiyun 	/*
282*4882a593Smuzhiyun 	 * Some platforms (e.g. 85xx) limit DMA-able memory way below
283*4882a593Smuzhiyun 	 * 4G. We force memblock to bottom-up mode to ensure that the
284*4882a593Smuzhiyun 	 * memory allocated in swiotlb_init() is DMA-able.
285*4882a593Smuzhiyun 	 * As it's the last memblock allocation, no need to reset it
286*4882a593Smuzhiyun 	 * back to to-down.
287*4882a593Smuzhiyun 	 */
288*4882a593Smuzhiyun 	memblock_set_bottom_up(true);
289*4882a593Smuzhiyun 	if (is_secure_guest())
290*4882a593Smuzhiyun 		svm_swiotlb_init();
291*4882a593Smuzhiyun 	else
292*4882a593Smuzhiyun 		swiotlb_init(0);
293*4882a593Smuzhiyun #endif
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
296*4882a593Smuzhiyun 	set_max_mapnr(max_pfn);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	kasan_late_init();
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	memblock_free_all();
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
303*4882a593Smuzhiyun 	{
304*4882a593Smuzhiyun 		unsigned long pfn, highmem_mapnr;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
307*4882a593Smuzhiyun 		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
308*4882a593Smuzhiyun 			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
309*4882a593Smuzhiyun 			struct page *page = pfn_to_page(pfn);
310*4882a593Smuzhiyun 			if (!memblock_is_reserved(paddr))
311*4882a593Smuzhiyun 				free_highmem_page(page);
312*4882a593Smuzhiyun 		}
313*4882a593Smuzhiyun 	}
314*4882a593Smuzhiyun #endif /* CONFIG_HIGHMEM */
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
317*4882a593Smuzhiyun 	/*
318*4882a593Smuzhiyun 	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
319*4882a593Smuzhiyun 	 * functions.... do it here for the non-smp case.
320*4882a593Smuzhiyun 	 */
321*4882a593Smuzhiyun 	per_cpu(next_tlbcam_idx, smp_processor_id()) =
322*4882a593Smuzhiyun 		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
323*4882a593Smuzhiyun #endif
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	mem_init_print_info(NULL);
326*4882a593Smuzhiyun #ifdef CONFIG_PPC32
327*4882a593Smuzhiyun 	pr_info("Kernel virtual memory layout:\n");
328*4882a593Smuzhiyun #ifdef CONFIG_KASAN
329*4882a593Smuzhiyun 	pr_info("  * 0x%08lx..0x%08lx  : kasan shadow mem\n",
330*4882a593Smuzhiyun 		KASAN_SHADOW_START, KASAN_SHADOW_END);
331*4882a593Smuzhiyun #endif
332*4882a593Smuzhiyun 	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
333*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
334*4882a593Smuzhiyun 	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
335*4882a593Smuzhiyun 		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
336*4882a593Smuzhiyun #endif /* CONFIG_HIGHMEM */
337*4882a593Smuzhiyun 	if (ioremap_bot != IOREMAP_TOP)
338*4882a593Smuzhiyun 		pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
339*4882a593Smuzhiyun 			ioremap_bot, IOREMAP_TOP);
340*4882a593Smuzhiyun 	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
341*4882a593Smuzhiyun 		VMALLOC_START, VMALLOC_END);
342*4882a593Smuzhiyun #endif /* CONFIG_PPC32 */
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun 
free_initmem(void)345*4882a593Smuzhiyun void free_initmem(void)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun 	ppc_md.progress = ppc_printk_progress;
348*4882a593Smuzhiyun 	mark_initmem_nx();
349*4882a593Smuzhiyun 	init_mem_is_free = true;
350*4882a593Smuzhiyun 	free_initmem_default(POISON_FREE_INITMEM);
351*4882a593Smuzhiyun 	ftrace_free_init_tramp();
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun /**
355*4882a593Smuzhiyun  * flush_coherent_icache() - if a CPU has a coherent icache, flush it
356*4882a593Smuzhiyun  * @addr: The base address to use (can be any valid address, the whole cache will be flushed)
357*4882a593Smuzhiyun  * Return true if the cache was flushed, false otherwise
358*4882a593Smuzhiyun  */
flush_coherent_icache(unsigned long addr)359*4882a593Smuzhiyun static inline bool flush_coherent_icache(unsigned long addr)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun 	/*
362*4882a593Smuzhiyun 	 * For a snooping icache, we still need a dummy icbi to purge all the
363*4882a593Smuzhiyun 	 * prefetched instructions from the ifetch buffers. We also need a sync
364*4882a593Smuzhiyun 	 * before the icbi to order the the actual stores to memory that might
365*4882a593Smuzhiyun 	 * have modified instructions with the icbi.
366*4882a593Smuzhiyun 	 */
367*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
368*4882a593Smuzhiyun 		mb(); /* sync */
369*4882a593Smuzhiyun 		allow_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
370*4882a593Smuzhiyun 		icbi((void *)addr);
371*4882a593Smuzhiyun 		prevent_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
372*4882a593Smuzhiyun 		mb(); /* sync */
373*4882a593Smuzhiyun 		isync();
374*4882a593Smuzhiyun 		return true;
375*4882a593Smuzhiyun 	}
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	return false;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun /**
381*4882a593Smuzhiyun  * invalidate_icache_range() - Flush the icache by issuing icbi across an address range
382*4882a593Smuzhiyun  * @start: the start address
383*4882a593Smuzhiyun  * @stop: the stop address (exclusive)
384*4882a593Smuzhiyun  */
invalidate_icache_range(unsigned long start,unsigned long stop)385*4882a593Smuzhiyun static void invalidate_icache_range(unsigned long start, unsigned long stop)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	unsigned long shift = l1_icache_shift();
388*4882a593Smuzhiyun 	unsigned long bytes = l1_icache_bytes();
389*4882a593Smuzhiyun 	char *addr = (char *)(start & ~(bytes - 1));
390*4882a593Smuzhiyun 	unsigned long size = stop - (unsigned long)addr + (bytes - 1);
391*4882a593Smuzhiyun 	unsigned long i;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	for (i = 0; i < size >> shift; i++, addr += bytes)
394*4882a593Smuzhiyun 		icbi(addr);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	mb(); /* sync */
397*4882a593Smuzhiyun 	isync();
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun /**
401*4882a593Smuzhiyun  * flush_icache_range: Write any modified data cache blocks out to memory
402*4882a593Smuzhiyun  * and invalidate the corresponding blocks in the instruction cache
403*4882a593Smuzhiyun  *
404*4882a593Smuzhiyun  * Generic code will call this after writing memory, before executing from it.
405*4882a593Smuzhiyun  *
406*4882a593Smuzhiyun  * @start: the start address
407*4882a593Smuzhiyun  * @stop: the stop address (exclusive)
408*4882a593Smuzhiyun  */
flush_icache_range(unsigned long start,unsigned long stop)409*4882a593Smuzhiyun void flush_icache_range(unsigned long start, unsigned long stop)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun 	if (flush_coherent_icache(start))
412*4882a593Smuzhiyun 		return;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	clean_dcache_range(start, stop);
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_44x)) {
417*4882a593Smuzhiyun 		/*
418*4882a593Smuzhiyun 		 * Flash invalidate on 44x because we are passed kmapped
419*4882a593Smuzhiyun 		 * addresses and this doesn't work for userspace pages due to
420*4882a593Smuzhiyun 		 * the virtually tagged icache.
421*4882a593Smuzhiyun 		 */
422*4882a593Smuzhiyun 		iccci((void *)start);
423*4882a593Smuzhiyun 		mb(); /* sync */
424*4882a593Smuzhiyun 		isync();
425*4882a593Smuzhiyun 	} else
426*4882a593Smuzhiyun 		invalidate_icache_range(start, stop);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun EXPORT_SYMBOL(flush_icache_range);
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun #if !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
431*4882a593Smuzhiyun /**
432*4882a593Smuzhiyun  * flush_dcache_icache_phys() - Flush a page by it's physical address
433*4882a593Smuzhiyun  * @physaddr: the physical address of the page
434*4882a593Smuzhiyun  */
flush_dcache_icache_phys(unsigned long physaddr)435*4882a593Smuzhiyun static void flush_dcache_icache_phys(unsigned long physaddr)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	unsigned long bytes = l1_dcache_bytes();
438*4882a593Smuzhiyun 	unsigned long nb = PAGE_SIZE / bytes;
439*4882a593Smuzhiyun 	unsigned long addr = physaddr & PAGE_MASK;
440*4882a593Smuzhiyun 	unsigned long msr, msr0;
441*4882a593Smuzhiyun 	unsigned long loop1 = addr, loop2 = addr;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	msr0 = mfmsr();
444*4882a593Smuzhiyun 	msr = msr0 & ~MSR_DR;
445*4882a593Smuzhiyun 	/*
446*4882a593Smuzhiyun 	 * This must remain as ASM to prevent potential memory accesses
447*4882a593Smuzhiyun 	 * while the data MMU is disabled
448*4882a593Smuzhiyun 	 */
449*4882a593Smuzhiyun 	asm volatile(
450*4882a593Smuzhiyun 		"   mtctr %2;\n"
451*4882a593Smuzhiyun 		"   mtmsr %3;\n"
452*4882a593Smuzhiyun 		"   isync;\n"
453*4882a593Smuzhiyun 		"0: dcbst   0, %0;\n"
454*4882a593Smuzhiyun 		"   addi    %0, %0, %4;\n"
455*4882a593Smuzhiyun 		"   bdnz    0b;\n"
456*4882a593Smuzhiyun 		"   sync;\n"
457*4882a593Smuzhiyun 		"   mtctr %2;\n"
458*4882a593Smuzhiyun 		"1: icbi    0, %1;\n"
459*4882a593Smuzhiyun 		"   addi    %1, %1, %4;\n"
460*4882a593Smuzhiyun 		"   bdnz    1b;\n"
461*4882a593Smuzhiyun 		"   sync;\n"
462*4882a593Smuzhiyun 		"   mtmsr %5;\n"
463*4882a593Smuzhiyun 		"   isync;\n"
464*4882a593Smuzhiyun 		: "+&r" (loop1), "+&r" (loop2)
465*4882a593Smuzhiyun 		: "r" (nb), "r" (msr), "i" (bytes), "r" (msr0)
466*4882a593Smuzhiyun 		: "ctr", "memory");
467*4882a593Smuzhiyun }
NOKPROBE_SYMBOL(flush_dcache_icache_phys)468*4882a593Smuzhiyun NOKPROBE_SYMBOL(flush_dcache_icache_phys)
469*4882a593Smuzhiyun #endif // !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun /*
472*4882a593Smuzhiyun  * This is called when a page has been modified by the kernel.
473*4882a593Smuzhiyun  * It just marks the page as not i-cache clean.  We do the i-cache
474*4882a593Smuzhiyun  * flush later when the page is given to a user process, if necessary.
475*4882a593Smuzhiyun  */
476*4882a593Smuzhiyun void flush_dcache_page(struct page *page)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun 	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
479*4882a593Smuzhiyun 		return;
480*4882a593Smuzhiyun 	/* avoid an atomic op if possible */
481*4882a593Smuzhiyun 	if (test_bit(PG_arch_1, &page->flags))
482*4882a593Smuzhiyun 		clear_bit(PG_arch_1, &page->flags);
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun EXPORT_SYMBOL(flush_dcache_page);
485*4882a593Smuzhiyun 
flush_dcache_icache_page(struct page * page)486*4882a593Smuzhiyun void flush_dcache_icache_page(struct page *page)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun #ifdef CONFIG_HUGETLB_PAGE
489*4882a593Smuzhiyun 	if (PageCompound(page)) {
490*4882a593Smuzhiyun 		flush_dcache_icache_hugepage(page);
491*4882a593Smuzhiyun 		return;
492*4882a593Smuzhiyun 	}
493*4882a593Smuzhiyun #endif
494*4882a593Smuzhiyun #if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
495*4882a593Smuzhiyun 	/* On 8xx there is no need to kmap since highmem is not supported */
496*4882a593Smuzhiyun 	__flush_dcache_icache(page_address(page));
497*4882a593Smuzhiyun #else
498*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
499*4882a593Smuzhiyun 		void *start = kmap_atomic(page);
500*4882a593Smuzhiyun 		__flush_dcache_icache(start);
501*4882a593Smuzhiyun 		kunmap_atomic(start);
502*4882a593Smuzhiyun 	} else {
503*4882a593Smuzhiyun 		unsigned long addr = page_to_pfn(page) << PAGE_SHIFT;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 		if (flush_coherent_icache(addr))
506*4882a593Smuzhiyun 			return;
507*4882a593Smuzhiyun 		flush_dcache_icache_phys(addr);
508*4882a593Smuzhiyun 	}
509*4882a593Smuzhiyun #endif
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun EXPORT_SYMBOL(flush_dcache_icache_page);
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun /**
514*4882a593Smuzhiyun  * __flush_dcache_icache(): Flush a particular page from the data cache to RAM.
515*4882a593Smuzhiyun  * Note: this is necessary because the instruction cache does *not*
516*4882a593Smuzhiyun  * snoop from the data cache.
517*4882a593Smuzhiyun  *
518*4882a593Smuzhiyun  * @page: the address of the page to flush
519*4882a593Smuzhiyun  */
__flush_dcache_icache(void * p)520*4882a593Smuzhiyun void __flush_dcache_icache(void *p)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun 	unsigned long addr = (unsigned long)p;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	if (flush_coherent_icache(addr))
525*4882a593Smuzhiyun 		return;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	clean_dcache_range(addr, addr + PAGE_SIZE);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	/*
530*4882a593Smuzhiyun 	 * We don't flush the icache on 44x. Those have a virtual icache and we
531*4882a593Smuzhiyun 	 * don't have access to the virtual address here (it's not the page
532*4882a593Smuzhiyun 	 * vaddr but where it's mapped in user space). The flushing of the
533*4882a593Smuzhiyun 	 * icache on these is handled elsewhere, when a change in the address
534*4882a593Smuzhiyun 	 * space occurs, before returning to user space.
535*4882a593Smuzhiyun 	 */
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	if (mmu_has_feature(MMU_FTR_TYPE_44x))
538*4882a593Smuzhiyun 		return;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	invalidate_icache_range(addr, addr + PAGE_SIZE);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun 
clear_user_page(void * page,unsigned long vaddr,struct page * pg)543*4882a593Smuzhiyun void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun 	clear_page(page);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	/*
548*4882a593Smuzhiyun 	 * We shouldn't have to do this, but some versions of glibc
549*4882a593Smuzhiyun 	 * require it (ld.so assumes zero filled pages are icache clean)
550*4882a593Smuzhiyun 	 * - Anton
551*4882a593Smuzhiyun 	 */
552*4882a593Smuzhiyun 	flush_dcache_page(pg);
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun EXPORT_SYMBOL(clear_user_page);
555*4882a593Smuzhiyun 
copy_user_page(void * vto,void * vfrom,unsigned long vaddr,struct page * pg)556*4882a593Smuzhiyun void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
557*4882a593Smuzhiyun 		    struct page *pg)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun 	copy_page(vto, vfrom);
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	/*
562*4882a593Smuzhiyun 	 * We should be able to use the following optimisation, however
563*4882a593Smuzhiyun 	 * there are two problems.
564*4882a593Smuzhiyun 	 * Firstly a bug in some versions of binutils meant PLT sections
565*4882a593Smuzhiyun 	 * were not marked executable.
566*4882a593Smuzhiyun 	 * Secondly the first word in the GOT section is blrl, used
567*4882a593Smuzhiyun 	 * to establish the GOT address. Until recently the GOT was
568*4882a593Smuzhiyun 	 * not marked executable.
569*4882a593Smuzhiyun 	 * - Anton
570*4882a593Smuzhiyun 	 */
571*4882a593Smuzhiyun #if 0
572*4882a593Smuzhiyun 	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
573*4882a593Smuzhiyun 		return;
574*4882a593Smuzhiyun #endif
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	flush_dcache_page(pg);
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun 
flush_icache_user_page(struct vm_area_struct * vma,struct page * page,unsigned long addr,int len)579*4882a593Smuzhiyun void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
580*4882a593Smuzhiyun 			     unsigned long addr, int len)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun 	unsigned long maddr;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
585*4882a593Smuzhiyun 	flush_icache_range(maddr, maddr + len);
586*4882a593Smuzhiyun 	kunmap(page);
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun /*
590*4882a593Smuzhiyun  * System memory should not be in /proc/iomem but various tools expect it
591*4882a593Smuzhiyun  * (eg kdump).
592*4882a593Smuzhiyun  */
add_system_ram_resources(void)593*4882a593Smuzhiyun static int __init add_system_ram_resources(void)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun 	phys_addr_t start, end;
596*4882a593Smuzhiyun 	u64 i;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	for_each_mem_range(i, &start, &end) {
599*4882a593Smuzhiyun 		struct resource *res;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
602*4882a593Smuzhiyun 		WARN_ON(!res);
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 		if (res) {
605*4882a593Smuzhiyun 			res->name = "System RAM";
606*4882a593Smuzhiyun 			res->start = start;
607*4882a593Smuzhiyun 			/*
608*4882a593Smuzhiyun 			 * In memblock, end points to the first byte after
609*4882a593Smuzhiyun 			 * the range while in resourses, end points to the
610*4882a593Smuzhiyun 			 * last byte in the range.
611*4882a593Smuzhiyun 			 */
612*4882a593Smuzhiyun 			res->end = end - 1;
613*4882a593Smuzhiyun 			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
614*4882a593Smuzhiyun 			WARN_ON(request_resource(&iomem_resource, res) < 0);
615*4882a593Smuzhiyun 		}
616*4882a593Smuzhiyun 	}
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	return 0;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun subsys_initcall(add_system_ram_resources);
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun #ifdef CONFIG_STRICT_DEVMEM
623*4882a593Smuzhiyun /*
624*4882a593Smuzhiyun  * devmem_is_allowed(): check to see if /dev/mem access to a certain address
625*4882a593Smuzhiyun  * is valid. The argument is a physical page number.
626*4882a593Smuzhiyun  *
627*4882a593Smuzhiyun  * Access has to be given to non-kernel-ram areas as well, these contain the
628*4882a593Smuzhiyun  * PCI mmio resources as well as potential bios/acpi data regions.
629*4882a593Smuzhiyun  */
devmem_is_allowed(unsigned long pfn)630*4882a593Smuzhiyun int devmem_is_allowed(unsigned long pfn)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun 	if (page_is_rtas_user_buf(pfn))
633*4882a593Smuzhiyun 		return 1;
634*4882a593Smuzhiyun 	if (iomem_is_exclusive(PFN_PHYS(pfn)))
635*4882a593Smuzhiyun 		return 0;
636*4882a593Smuzhiyun 	if (!page_is_ram(pfn))
637*4882a593Smuzhiyun 		return 1;
638*4882a593Smuzhiyun 	return 0;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun #endif /* CONFIG_STRICT_DEVMEM */
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun /*
643*4882a593Smuzhiyun  * This is defined in kernel/resource.c but only powerpc needs to export it, for
644*4882a593Smuzhiyun  * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
645*4882a593Smuzhiyun  */
646*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(walk_system_ram_range);
647