1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * PowerPC version
4*4882a593Smuzhiyun * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7*4882a593Smuzhiyun * and Cort Dougan (PReP) (cort@cs.nmt.edu)
8*4882a593Smuzhiyun * Copyright (C) 1996 Paul Mackerras
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Derived from "arch/i386/mm/init.c"
11*4882a593Smuzhiyun * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Dave Engebretsen <engebret@us.ibm.com>
14*4882a593Smuzhiyun * Rework for PPC64 port.
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #undef DEBUG
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <linux/signal.h>
20*4882a593Smuzhiyun #include <linux/sched.h>
21*4882a593Smuzhiyun #include <linux/kernel.h>
22*4882a593Smuzhiyun #include <linux/errno.h>
23*4882a593Smuzhiyun #include <linux/string.h>
24*4882a593Smuzhiyun #include <linux/types.h>
25*4882a593Smuzhiyun #include <linux/mman.h>
26*4882a593Smuzhiyun #include <linux/mm.h>
27*4882a593Smuzhiyun #include <linux/swap.h>
28*4882a593Smuzhiyun #include <linux/stddef.h>
29*4882a593Smuzhiyun #include <linux/vmalloc.h>
30*4882a593Smuzhiyun #include <linux/init.h>
31*4882a593Smuzhiyun #include <linux/delay.h>
32*4882a593Smuzhiyun #include <linux/highmem.h>
33*4882a593Smuzhiyun #include <linux/idr.h>
34*4882a593Smuzhiyun #include <linux/nodemask.h>
35*4882a593Smuzhiyun #include <linux/module.h>
36*4882a593Smuzhiyun #include <linux/poison.h>
37*4882a593Smuzhiyun #include <linux/memblock.h>
38*4882a593Smuzhiyun #include <linux/hugetlb.h>
39*4882a593Smuzhiyun #include <linux/slab.h>
40*4882a593Smuzhiyun #include <linux/of_fdt.h>
41*4882a593Smuzhiyun #include <linux/libfdt.h>
42*4882a593Smuzhiyun #include <linux/memremap.h>
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #include <asm/pgalloc.h>
45*4882a593Smuzhiyun #include <asm/page.h>
46*4882a593Smuzhiyun #include <asm/prom.h>
47*4882a593Smuzhiyun #include <asm/rtas.h>
48*4882a593Smuzhiyun #include <asm/io.h>
49*4882a593Smuzhiyun #include <asm/mmu_context.h>
50*4882a593Smuzhiyun #include <asm/mmu.h>
51*4882a593Smuzhiyun #include <linux/uaccess.h>
52*4882a593Smuzhiyun #include <asm/smp.h>
53*4882a593Smuzhiyun #include <asm/machdep.h>
54*4882a593Smuzhiyun #include <asm/tlb.h>
55*4882a593Smuzhiyun #include <asm/eeh.h>
56*4882a593Smuzhiyun #include <asm/processor.h>
57*4882a593Smuzhiyun #include <asm/mmzone.h>
58*4882a593Smuzhiyun #include <asm/cputable.h>
59*4882a593Smuzhiyun #include <asm/sections.h>
60*4882a593Smuzhiyun #include <asm/iommu.h>
61*4882a593Smuzhiyun #include <asm/vdso.h>
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun #include <mm/mmu_decl.h>
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun #ifdef CONFIG_SPARSEMEM_VMEMMAP
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun * Given an address within the vmemmap, determine the page that
68*4882a593Smuzhiyun * represents the start of the subsection it is within. Note that we have to
69*4882a593Smuzhiyun * do this by hand as the proffered address may not be correctly aligned.
70*4882a593Smuzhiyun * Subtraction of non-aligned pointers produces undefined results.
71*4882a593Smuzhiyun */
vmemmap_subsection_start(unsigned long vmemmap_addr)72*4882a593Smuzhiyun static struct page * __meminit vmemmap_subsection_start(unsigned long vmemmap_addr)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun unsigned long start_pfn;
75*4882a593Smuzhiyun unsigned long offset = vmemmap_addr - ((unsigned long)(vmemmap));
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /* Return the pfn of the start of the section. */
78*4882a593Smuzhiyun start_pfn = (offset / sizeof(struct page)) & PAGE_SUBSECTION_MASK;
79*4882a593Smuzhiyun return pfn_to_page(start_pfn);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun * Since memory is added in sub-section chunks, before creating a new vmemmap
84*4882a593Smuzhiyun * mapping, the kernel should check whether there is an existing memmap mapping
85*4882a593Smuzhiyun * covering the new subsection added. This is needed because kernel can map
86*4882a593Smuzhiyun * vmemmap area using 16MB pages which will cover a memory range of 16G. Such
87*4882a593Smuzhiyun * a range covers multiple subsections (2M)
88*4882a593Smuzhiyun *
89*4882a593Smuzhiyun * If any subsection in the 16G range mapped by vmemmap is valid we consider the
90*4882a593Smuzhiyun * vmemmap populated (There is a page table entry already present). We can't do
91*4882a593Smuzhiyun * a page table lookup here because with the hash translation we don't keep
92*4882a593Smuzhiyun * vmemmap details in linux page table.
93*4882a593Smuzhiyun */
vmemmap_populated(unsigned long vmemmap_addr,int vmemmap_map_size)94*4882a593Smuzhiyun static int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun struct page *start;
97*4882a593Smuzhiyun unsigned long vmemmap_end = vmemmap_addr + vmemmap_map_size;
98*4882a593Smuzhiyun start = vmemmap_subsection_start(vmemmap_addr);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun for (; (unsigned long)start < vmemmap_end; start += PAGES_PER_SUBSECTION)
101*4882a593Smuzhiyun /*
102*4882a593Smuzhiyun * pfn valid check here is intended to really check
103*4882a593Smuzhiyun * whether we have any subsection already initialized
104*4882a593Smuzhiyun * in this range.
105*4882a593Smuzhiyun */
106*4882a593Smuzhiyun if (pfn_valid(page_to_pfn(start)))
107*4882a593Smuzhiyun return 1;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun return 0;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun * vmemmap virtual address space management does not have a traditonal page
114*4882a593Smuzhiyun * table to track which virtual struct pages are backed by physical mapping.
115*4882a593Smuzhiyun * The virtual to physical mappings are tracked in a simple linked list
116*4882a593Smuzhiyun * format. 'vmemmap_list' maintains the entire vmemmap physical mapping at
117*4882a593Smuzhiyun * all times where as the 'next' list maintains the available
118*4882a593Smuzhiyun * vmemmap_backing structures which have been deleted from the
119*4882a593Smuzhiyun * 'vmemmap_global' list during system runtime (memory hotplug remove
120*4882a593Smuzhiyun * operation). The freed 'vmemmap_backing' structures are reused later when
121*4882a593Smuzhiyun * new requests come in without allocating fresh memory. This pointer also
122*4882a593Smuzhiyun * tracks the allocated 'vmemmap_backing' structures as we allocate one
123*4882a593Smuzhiyun * full page memory at a time when we dont have any.
124*4882a593Smuzhiyun */
125*4882a593Smuzhiyun struct vmemmap_backing *vmemmap_list;
126*4882a593Smuzhiyun static struct vmemmap_backing *next;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /*
129*4882a593Smuzhiyun * The same pointer 'next' tracks individual chunks inside the allocated
130*4882a593Smuzhiyun * full page during the boot time and again tracks the freeed nodes during
131*4882a593Smuzhiyun * runtime. It is racy but it does not happen as they are separated by the
132*4882a593Smuzhiyun * boot process. Will create problem if some how we have memory hotplug
133*4882a593Smuzhiyun * operation during boot !!
134*4882a593Smuzhiyun */
135*4882a593Smuzhiyun static int num_left;
136*4882a593Smuzhiyun static int num_freed;
137*4882a593Smuzhiyun
vmemmap_list_alloc(int node)138*4882a593Smuzhiyun static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun struct vmemmap_backing *vmem_back;
141*4882a593Smuzhiyun /* get from freed entries first */
142*4882a593Smuzhiyun if (num_freed) {
143*4882a593Smuzhiyun num_freed--;
144*4882a593Smuzhiyun vmem_back = next;
145*4882a593Smuzhiyun next = next->list;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun return vmem_back;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /* allocate a page when required and hand out chunks */
151*4882a593Smuzhiyun if (!num_left) {
152*4882a593Smuzhiyun next = vmemmap_alloc_block(PAGE_SIZE, node);
153*4882a593Smuzhiyun if (unlikely(!next)) {
154*4882a593Smuzhiyun WARN_ON(1);
155*4882a593Smuzhiyun return NULL;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun num_left--;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun return next++;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
vmemmap_list_populate(unsigned long phys,unsigned long start,int node)165*4882a593Smuzhiyun static __meminit int vmemmap_list_populate(unsigned long phys,
166*4882a593Smuzhiyun unsigned long start,
167*4882a593Smuzhiyun int node)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun struct vmemmap_backing *vmem_back;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun vmem_back = vmemmap_list_alloc(node);
172*4882a593Smuzhiyun if (unlikely(!vmem_back)) {
173*4882a593Smuzhiyun pr_debug("vmemap list allocation failed\n");
174*4882a593Smuzhiyun return -ENOMEM;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun vmem_back->phys = phys;
178*4882a593Smuzhiyun vmem_back->virt_addr = start;
179*4882a593Smuzhiyun vmem_back->list = vmemmap_list;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun vmemmap_list = vmem_back;
182*4882a593Smuzhiyun return 0;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
altmap_cross_boundary(struct vmem_altmap * altmap,unsigned long start,unsigned long page_size)185*4882a593Smuzhiyun static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
186*4882a593Smuzhiyun unsigned long page_size)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun unsigned long nr_pfn = page_size / sizeof(struct page);
189*4882a593Smuzhiyun unsigned long start_pfn = page_to_pfn((struct page *)start);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun if ((start_pfn + nr_pfn) > altmap->end_pfn)
192*4882a593Smuzhiyun return true;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (start_pfn < altmap->base_pfn)
195*4882a593Smuzhiyun return true;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun return false;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
vmemmap_populate(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)200*4882a593Smuzhiyun int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
201*4882a593Smuzhiyun struct vmem_altmap *altmap)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun bool altmap_alloc;
204*4882a593Smuzhiyun unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /* Align to the page size of the linear mapping. */
207*4882a593Smuzhiyun start = ALIGN_DOWN(start, page_size);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun for (; start < end; start += page_size) {
212*4882a593Smuzhiyun void *p = NULL;
213*4882a593Smuzhiyun int rc;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /*
216*4882a593Smuzhiyun * This vmemmap range is backing different subsections. If any
217*4882a593Smuzhiyun * of that subsection is marked valid, that means we already
218*4882a593Smuzhiyun * have initialized a page table covering this range and hence
219*4882a593Smuzhiyun * the vmemmap range is populated.
220*4882a593Smuzhiyun */
221*4882a593Smuzhiyun if (vmemmap_populated(start, page_size))
222*4882a593Smuzhiyun continue;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /*
225*4882a593Smuzhiyun * Allocate from the altmap first if we have one. This may
226*4882a593Smuzhiyun * fail due to alignment issues when using 16MB hugepages, so
227*4882a593Smuzhiyun * fall back to system memory if the altmap allocation fail.
228*4882a593Smuzhiyun */
229*4882a593Smuzhiyun if (altmap && !altmap_cross_boundary(altmap, start, page_size)) {
230*4882a593Smuzhiyun p = vmemmap_alloc_block_buf(page_size, node, altmap);
231*4882a593Smuzhiyun if (!p)
232*4882a593Smuzhiyun pr_debug("altmap block allocation failed, falling back to system memory");
233*4882a593Smuzhiyun else
234*4882a593Smuzhiyun altmap_alloc = true;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun if (!p) {
237*4882a593Smuzhiyun p = vmemmap_alloc_block_buf(page_size, node, NULL);
238*4882a593Smuzhiyun altmap_alloc = false;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun if (!p)
241*4882a593Smuzhiyun return -ENOMEM;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun if (vmemmap_list_populate(__pa(p), start, node)) {
244*4882a593Smuzhiyun /*
245*4882a593Smuzhiyun * If we don't populate vmemap list, we don't have
246*4882a593Smuzhiyun * the ability to free the allocated vmemmap
247*4882a593Smuzhiyun * pages in section_deactivate. Hence free them
248*4882a593Smuzhiyun * here.
249*4882a593Smuzhiyun */
250*4882a593Smuzhiyun int nr_pfns = page_size >> PAGE_SHIFT;
251*4882a593Smuzhiyun unsigned long page_order = get_order(page_size);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun if (altmap_alloc)
254*4882a593Smuzhiyun vmem_altmap_free(altmap, nr_pfns);
255*4882a593Smuzhiyun else
256*4882a593Smuzhiyun free_pages((unsigned long)p, page_order);
257*4882a593Smuzhiyun return -ENOMEM;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun pr_debug(" * %016lx..%016lx allocated at %p\n",
261*4882a593Smuzhiyun start, start + page_size, p);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun rc = vmemmap_create_mapping(start, page_size, __pa(p));
264*4882a593Smuzhiyun if (rc < 0) {
265*4882a593Smuzhiyun pr_warn("%s: Unable to create vmemmap mapping: %d\n",
266*4882a593Smuzhiyun __func__, rc);
267*4882a593Smuzhiyun return -EFAULT;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun return 0;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_HOTPLUG
vmemmap_list_free(unsigned long start)275*4882a593Smuzhiyun static unsigned long vmemmap_list_free(unsigned long start)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun struct vmemmap_backing *vmem_back, *vmem_back_prev;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun vmem_back_prev = vmem_back = vmemmap_list;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun /* look for it with prev pointer recorded */
282*4882a593Smuzhiyun for (; vmem_back; vmem_back = vmem_back->list) {
283*4882a593Smuzhiyun if (vmem_back->virt_addr == start)
284*4882a593Smuzhiyun break;
285*4882a593Smuzhiyun vmem_back_prev = vmem_back;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun if (unlikely(!vmem_back))
289*4882a593Smuzhiyun return 0;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /* remove it from vmemmap_list */
292*4882a593Smuzhiyun if (vmem_back == vmemmap_list) /* remove head */
293*4882a593Smuzhiyun vmemmap_list = vmem_back->list;
294*4882a593Smuzhiyun else
295*4882a593Smuzhiyun vmem_back_prev->list = vmem_back->list;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /* next point to this freed entry */
298*4882a593Smuzhiyun vmem_back->list = next;
299*4882a593Smuzhiyun next = vmem_back;
300*4882a593Smuzhiyun num_freed++;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun return vmem_back->phys;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
vmemmap_free(unsigned long start,unsigned long end,struct vmem_altmap * altmap)305*4882a593Smuzhiyun void __ref vmemmap_free(unsigned long start, unsigned long end,
306*4882a593Smuzhiyun struct vmem_altmap *altmap)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
309*4882a593Smuzhiyun unsigned long page_order = get_order(page_size);
310*4882a593Smuzhiyun unsigned long alt_start = ~0, alt_end = ~0;
311*4882a593Smuzhiyun unsigned long base_pfn;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun start = ALIGN_DOWN(start, page_size);
314*4882a593Smuzhiyun if (altmap) {
315*4882a593Smuzhiyun alt_start = altmap->base_pfn;
316*4882a593Smuzhiyun alt_end = altmap->base_pfn + altmap->reserve +
317*4882a593Smuzhiyun altmap->free + altmap->alloc + altmap->align;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun pr_debug("vmemmap_free %lx...%lx\n", start, end);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun for (; start < end; start += page_size) {
323*4882a593Smuzhiyun unsigned long nr_pages, addr;
324*4882a593Smuzhiyun struct page *page;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /*
327*4882a593Smuzhiyun * We have already marked the subsection we are trying to remove
328*4882a593Smuzhiyun * invalid. So if we want to remove the vmemmap range, we
329*4882a593Smuzhiyun * need to make sure there is no subsection marked valid
330*4882a593Smuzhiyun * in this range.
331*4882a593Smuzhiyun */
332*4882a593Smuzhiyun if (vmemmap_populated(start, page_size))
333*4882a593Smuzhiyun continue;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun addr = vmemmap_list_free(start);
336*4882a593Smuzhiyun if (!addr)
337*4882a593Smuzhiyun continue;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun page = pfn_to_page(addr >> PAGE_SHIFT);
340*4882a593Smuzhiyun nr_pages = 1 << page_order;
341*4882a593Smuzhiyun base_pfn = PHYS_PFN(addr);
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun if (base_pfn >= alt_start && base_pfn < alt_end) {
344*4882a593Smuzhiyun vmem_altmap_free(altmap, nr_pages);
345*4882a593Smuzhiyun } else if (PageReserved(page)) {
346*4882a593Smuzhiyun /* allocated from bootmem */
347*4882a593Smuzhiyun if (page_size < PAGE_SIZE) {
348*4882a593Smuzhiyun /*
349*4882a593Smuzhiyun * this shouldn't happen, but if it is
350*4882a593Smuzhiyun * the case, leave the memory there
351*4882a593Smuzhiyun */
352*4882a593Smuzhiyun WARN_ON_ONCE(1);
353*4882a593Smuzhiyun } else {
354*4882a593Smuzhiyun while (nr_pages--)
355*4882a593Smuzhiyun free_reserved_page(page++);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun } else {
358*4882a593Smuzhiyun free_pages((unsigned long)(__va(addr)), page_order);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun vmemmap_remove_mapping(start, page_size);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun #endif
register_page_bootmem_memmap(unsigned long section_nr,struct page * start_page,unsigned long size)365*4882a593Smuzhiyun void register_page_bootmem_memmap(unsigned long section_nr,
366*4882a593Smuzhiyun struct page *start_page, unsigned long size)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun #endif /* CONFIG_SPARSEMEM_VMEMMAP */
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
373*4882a593Smuzhiyun static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
374*4882a593Smuzhiyun
parse_disable_radix(char * p)375*4882a593Smuzhiyun static int __init parse_disable_radix(char *p)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun bool val;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun if (!p)
380*4882a593Smuzhiyun val = true;
381*4882a593Smuzhiyun else if (kstrtobool(p, &val))
382*4882a593Smuzhiyun return -EINVAL;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun disable_radix = val;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun return 0;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun early_param("disable_radix", parse_disable_radix);
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun /*
391*4882a593Smuzhiyun * If we're running under a hypervisor, we need to check the contents of
392*4882a593Smuzhiyun * /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do
393*4882a593Smuzhiyun * radix. If not, we clear the radix feature bit so we fall back to hash.
394*4882a593Smuzhiyun */
early_check_vec5(void)395*4882a593Smuzhiyun static void __init early_check_vec5(void)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun unsigned long root, chosen;
398*4882a593Smuzhiyun int size;
399*4882a593Smuzhiyun const u8 *vec5;
400*4882a593Smuzhiyun u8 mmu_supported;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun root = of_get_flat_dt_root();
403*4882a593Smuzhiyun chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
404*4882a593Smuzhiyun if (chosen == -FDT_ERR_NOTFOUND) {
405*4882a593Smuzhiyun cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
406*4882a593Smuzhiyun return;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
409*4882a593Smuzhiyun if (!vec5) {
410*4882a593Smuzhiyun cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
411*4882a593Smuzhiyun return;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
414*4882a593Smuzhiyun cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
415*4882a593Smuzhiyun return;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /* Check for supported configuration */
419*4882a593Smuzhiyun mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
420*4882a593Smuzhiyun OV5_FEAT(OV5_MMU_SUPPORT);
421*4882a593Smuzhiyun if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
422*4882a593Smuzhiyun /* Hypervisor only supports radix - check enabled && GTSE */
423*4882a593Smuzhiyun if (!early_radix_enabled()) {
424*4882a593Smuzhiyun pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
427*4882a593Smuzhiyun OV5_FEAT(OV5_RADIX_GTSE))) {
428*4882a593Smuzhiyun cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE;
429*4882a593Smuzhiyun } else
430*4882a593Smuzhiyun cur_cpu_spec->mmu_features |= MMU_FTR_GTSE;
431*4882a593Smuzhiyun /* Do radix anyway - the hypervisor said we had to */
432*4882a593Smuzhiyun cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
433*4882a593Smuzhiyun } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
434*4882a593Smuzhiyun /* Hypervisor only supports hash - disable radix */
435*4882a593Smuzhiyun cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
436*4882a593Smuzhiyun cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
mmu_early_init_devtree(void)440*4882a593Smuzhiyun void __init mmu_early_init_devtree(void)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun /* Disable radix mode based on kernel command line. */
443*4882a593Smuzhiyun if (disable_radix)
444*4882a593Smuzhiyun cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun /*
447*4882a593Smuzhiyun * Check /chosen/ibm,architecture-vec-5 if running as a guest.
448*4882a593Smuzhiyun * When running bare-metal, we can use radix if we like
449*4882a593Smuzhiyun * even though the ibm,architecture-vec-5 property created by
450*4882a593Smuzhiyun * skiboot doesn't have the necessary bits set.
451*4882a593Smuzhiyun */
452*4882a593Smuzhiyun if (!(mfmsr() & MSR_HV))
453*4882a593Smuzhiyun early_check_vec5();
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun if (early_radix_enabled()) {
456*4882a593Smuzhiyun radix__early_init_devtree();
457*4882a593Smuzhiyun /*
458*4882a593Smuzhiyun * We have finalized the translation we are going to use by now.
459*4882a593Smuzhiyun * Radix mode is not limited by RMA / VRMA addressing.
460*4882a593Smuzhiyun * Hence don't limit memblock allocations.
461*4882a593Smuzhiyun */
462*4882a593Smuzhiyun ppc64_rma_size = ULONG_MAX;
463*4882a593Smuzhiyun memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
464*4882a593Smuzhiyun } else
465*4882a593Smuzhiyun hash__early_init_devtree();
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun #endif /* CONFIG_PPC_BOOK3S_64 */
468