1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun /******************************************************************************
4*4882a593Smuzhiyun * Xen memory reservation utilities.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (c) 2003, B Dragovic
7*4882a593Smuzhiyun * Copyright (c) 2003-2004, M Williamson, K Fraser
8*4882a593Smuzhiyun * Copyright (c) 2005 Dan M. Smith, IBM Corporation
9*4882a593Smuzhiyun * Copyright (c) 2010 Daniel Kiper
10*4882a593Smuzhiyun * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <asm/xen/hypercall.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <xen/interface/memory.h>
16*4882a593Smuzhiyun #include <xen/mem-reservation.h>
17*4882a593Smuzhiyun #include <linux/moduleparam.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT);
20*4882a593Smuzhiyun core_param(xen_scrub_pages, xen_scrub_pages, bool, 0);
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun * Use one extent per PAGE_SIZE to avoid to break down the page into
24*4882a593Smuzhiyun * multiple frame.
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #ifdef CONFIG_XEN_HAVE_PVMMU
__xenmem_reservation_va_mapping_update(unsigned long count,struct page ** pages,xen_pfn_t * frames)29*4882a593Smuzhiyun void __xenmem_reservation_va_mapping_update(unsigned long count,
30*4882a593Smuzhiyun struct page **pages,
31*4882a593Smuzhiyun xen_pfn_t *frames)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun int i;
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun for (i = 0; i < count; i++) {
36*4882a593Smuzhiyun struct page *page = pages[i];
37*4882a593Smuzhiyun unsigned long pfn = page_to_pfn(page);
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun BUG_ON(!page);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /*
42*4882a593Smuzhiyun * We don't support PV MMU when Linux and Xen is using
43*4882a593Smuzhiyun * different page granularity.
44*4882a593Smuzhiyun */
45*4882a593Smuzhiyun BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun set_phys_to_machine(pfn, frames[i]);
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* Link back into the page tables if not highmem. */
50*4882a593Smuzhiyun if (!PageHighMem(page)) {
51*4882a593Smuzhiyun int ret;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun ret = HYPERVISOR_update_va_mapping(
54*4882a593Smuzhiyun (unsigned long)__va(pfn << PAGE_SHIFT),
55*4882a593Smuzhiyun mfn_pte(frames[i], PAGE_KERNEL),
56*4882a593Smuzhiyun 0);
57*4882a593Smuzhiyun BUG_ON(ret);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update);
62*4882a593Smuzhiyun
__xenmem_reservation_va_mapping_reset(unsigned long count,struct page ** pages)63*4882a593Smuzhiyun void __xenmem_reservation_va_mapping_reset(unsigned long count,
64*4882a593Smuzhiyun struct page **pages)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun int i;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun for (i = 0; i < count; i++) {
69*4882a593Smuzhiyun struct page *page = pages[i];
70*4882a593Smuzhiyun unsigned long pfn = page_to_pfn(page);
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun * We don't support PV MMU when Linux and Xen are using
74*4882a593Smuzhiyun * different page granularity.
75*4882a593Smuzhiyun */
76*4882a593Smuzhiyun BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun if (!PageHighMem(page)) {
79*4882a593Smuzhiyun int ret;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun ret = HYPERVISOR_update_va_mapping(
82*4882a593Smuzhiyun (unsigned long)__va(pfn << PAGE_SHIFT),
83*4882a593Smuzhiyun __pte_ma(0), 0);
84*4882a593Smuzhiyun BUG_ON(ret);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_reset);
90*4882a593Smuzhiyun #endif /* CONFIG_XEN_HAVE_PVMMU */
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /* @frames is an array of PFNs */
xenmem_reservation_increase(int count,xen_pfn_t * frames)93*4882a593Smuzhiyun int xenmem_reservation_increase(int count, xen_pfn_t *frames)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun struct xen_memory_reservation reservation = {
96*4882a593Smuzhiyun .address_bits = 0,
97*4882a593Smuzhiyun .extent_order = EXTENT_ORDER,
98*4882a593Smuzhiyun .domid = DOMID_SELF
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* XENMEM_populate_physmap requires a PFN based on Xen granularity. */
102*4882a593Smuzhiyun set_xen_guest_handle(reservation.extent_start, frames);
103*4882a593Smuzhiyun reservation.nr_extents = count;
104*4882a593Smuzhiyun return HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenmem_reservation_increase);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* @frames is an array of GFNs */
xenmem_reservation_decrease(int count,xen_pfn_t * frames)109*4882a593Smuzhiyun int xenmem_reservation_decrease(int count, xen_pfn_t *frames)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun struct xen_memory_reservation reservation = {
112*4882a593Smuzhiyun .address_bits = 0,
113*4882a593Smuzhiyun .extent_order = EXTENT_ORDER,
114*4882a593Smuzhiyun .domid = DOMID_SELF
115*4882a593Smuzhiyun };
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* XENMEM_decrease_reservation requires a GFN */
118*4882a593Smuzhiyun set_xen_guest_handle(reservation.extent_start, frames);
119*4882a593Smuzhiyun reservation.nr_extents = count;
120*4882a593Smuzhiyun return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenmem_reservation_decrease);
123