1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/errno.h>
3*4882a593Smuzhiyun #include <linux/gfp.h>
4*4882a593Smuzhiyun #include <linux/kernel.h>
5*4882a593Smuzhiyun #include <linux/mm.h>
6*4882a593Smuzhiyun #include <linux/memremap.h>
7*4882a593Smuzhiyun #include <linux/slab.h>
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <asm/page.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <xen/page.h>
12*4882a593Smuzhiyun #include <xen/xen.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun static DEFINE_MUTEX(list_lock);
15*4882a593Smuzhiyun static struct page *page_list;
16*4882a593Smuzhiyun static unsigned int list_count;
17*4882a593Smuzhiyun
fill_list(unsigned int nr_pages)18*4882a593Smuzhiyun static int fill_list(unsigned int nr_pages)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun struct dev_pagemap *pgmap;
21*4882a593Smuzhiyun struct resource *res;
22*4882a593Smuzhiyun void *vaddr;
23*4882a593Smuzhiyun unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
24*4882a593Smuzhiyun int ret = -ENOMEM;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun res = kzalloc(sizeof(*res), GFP_KERNEL);
27*4882a593Smuzhiyun if (!res)
28*4882a593Smuzhiyun return -ENOMEM;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun res->name = "Xen scratch";
31*4882a593Smuzhiyun res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun ret = allocate_resource(&iomem_resource, res,
34*4882a593Smuzhiyun alloc_pages * PAGE_SIZE, 0, -1,
35*4882a593Smuzhiyun PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
36*4882a593Smuzhiyun if (ret < 0) {
37*4882a593Smuzhiyun pr_err("Cannot allocate new IOMEM resource\n");
38*4882a593Smuzhiyun goto err_resource;
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
42*4882a593Smuzhiyun if (!pgmap) {
43*4882a593Smuzhiyun ret = -ENOMEM;
44*4882a593Smuzhiyun goto err_pgmap;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun pgmap->type = MEMORY_DEVICE_GENERIC;
48*4882a593Smuzhiyun pgmap->range = (struct range) {
49*4882a593Smuzhiyun .start = res->start,
50*4882a593Smuzhiyun .end = res->end,
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun pgmap->nr_range = 1;
53*4882a593Smuzhiyun pgmap->owner = res;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #ifdef CONFIG_XEN_HAVE_PVMMU
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun * memremap will build page tables for the new memory so
58*4882a593Smuzhiyun * the p2m must contain invalid entries so the correct
59*4882a593Smuzhiyun * non-present PTEs will be written.
60*4882a593Smuzhiyun *
61*4882a593Smuzhiyun * If a failure occurs, the original (identity) p2m entries
62*4882a593Smuzhiyun * are not restored since this region is now known not to
63*4882a593Smuzhiyun * conflict with any devices.
64*4882a593Smuzhiyun */
65*4882a593Smuzhiyun if (!xen_feature(XENFEAT_auto_translated_physmap)) {
66*4882a593Smuzhiyun xen_pfn_t pfn = PFN_DOWN(res->start);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun for (i = 0; i < alloc_pages; i++) {
69*4882a593Smuzhiyun if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
70*4882a593Smuzhiyun pr_warn("set_phys_to_machine() failed, no memory added\n");
71*4882a593Smuzhiyun ret = -ENOMEM;
72*4882a593Smuzhiyun goto err_memremap;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun #endif
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
79*4882a593Smuzhiyun if (IS_ERR(vaddr)) {
80*4882a593Smuzhiyun pr_err("Cannot remap memory range\n");
81*4882a593Smuzhiyun ret = PTR_ERR(vaddr);
82*4882a593Smuzhiyun goto err_memremap;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun for (i = 0; i < alloc_pages; i++) {
86*4882a593Smuzhiyun struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
89*4882a593Smuzhiyun pg->zone_device_data = page_list;
90*4882a593Smuzhiyun page_list = pg;
91*4882a593Smuzhiyun list_count++;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun return 0;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun err_memremap:
97*4882a593Smuzhiyun kfree(pgmap);
98*4882a593Smuzhiyun err_pgmap:
99*4882a593Smuzhiyun release_resource(res);
100*4882a593Smuzhiyun err_resource:
101*4882a593Smuzhiyun kfree(res);
102*4882a593Smuzhiyun return ret;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /**
106*4882a593Smuzhiyun * xen_alloc_unpopulated_pages - alloc unpopulated pages
107*4882a593Smuzhiyun * @nr_pages: Number of pages
108*4882a593Smuzhiyun * @pages: pages returned
109*4882a593Smuzhiyun * @return 0 on success, error otherwise
110*4882a593Smuzhiyun */
xen_alloc_unpopulated_pages(unsigned int nr_pages,struct page ** pages)111*4882a593Smuzhiyun int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun unsigned int i;
114*4882a593Smuzhiyun int ret = 0;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun mutex_lock(&list_lock);
117*4882a593Smuzhiyun if (list_count < nr_pages) {
118*4882a593Smuzhiyun ret = fill_list(nr_pages - list_count);
119*4882a593Smuzhiyun if (ret)
120*4882a593Smuzhiyun goto out;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun for (i = 0; i < nr_pages; i++) {
124*4882a593Smuzhiyun struct page *pg = page_list;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun BUG_ON(!pg);
127*4882a593Smuzhiyun page_list = pg->zone_device_data;
128*4882a593Smuzhiyun list_count--;
129*4882a593Smuzhiyun pages[i] = pg;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun #ifdef CONFIG_XEN_HAVE_PVMMU
132*4882a593Smuzhiyun if (!xen_feature(XENFEAT_auto_translated_physmap)) {
133*4882a593Smuzhiyun ret = xen_alloc_p2m_entry(page_to_pfn(pg));
134*4882a593Smuzhiyun if (ret < 0) {
135*4882a593Smuzhiyun unsigned int j;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun for (j = 0; j <= i; j++) {
138*4882a593Smuzhiyun pages[j]->zone_device_data = page_list;
139*4882a593Smuzhiyun page_list = pages[j];
140*4882a593Smuzhiyun list_count++;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun goto out;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun #endif
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun out:
149*4882a593Smuzhiyun mutex_unlock(&list_lock);
150*4882a593Smuzhiyun return ret;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /**
155*4882a593Smuzhiyun * xen_free_unpopulated_pages - return unpopulated pages
156*4882a593Smuzhiyun * @nr_pages: Number of pages
157*4882a593Smuzhiyun * @pages: pages to return
158*4882a593Smuzhiyun */
xen_free_unpopulated_pages(unsigned int nr_pages,struct page ** pages)159*4882a593Smuzhiyun void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun unsigned int i;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun mutex_lock(&list_lock);
164*4882a593Smuzhiyun for (i = 0; i < nr_pages; i++) {
165*4882a593Smuzhiyun pages[i]->zone_device_data = page_list;
166*4882a593Smuzhiyun page_list = pages[i];
167*4882a593Smuzhiyun list_count++;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun mutex_unlock(&list_lock);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun EXPORT_SYMBOL(xen_free_unpopulated_pages);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun #ifdef CONFIG_XEN_PV
init(void)174*4882a593Smuzhiyun static int __init init(void)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun unsigned int i;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun if (!xen_domain())
179*4882a593Smuzhiyun return -ENODEV;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun if (!xen_pv_domain())
182*4882a593Smuzhiyun return 0;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun /*
185*4882a593Smuzhiyun * Initialize with pages from the extra memory regions (see
186*4882a593Smuzhiyun * arch/x86/xen/setup.c).
187*4882a593Smuzhiyun */
188*4882a593Smuzhiyun for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
189*4882a593Smuzhiyun unsigned int j;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
192*4882a593Smuzhiyun struct page *pg =
193*4882a593Smuzhiyun pfn_to_page(xen_extra_mem[i].start_pfn + j);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun pg->zone_device_data = page_list;
196*4882a593Smuzhiyun page_list = pg;
197*4882a593Smuzhiyun list_count++;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun return 0;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun subsys_initcall(init);
204*4882a593Smuzhiyun #endif
205