xref: /OK3568_Linux_fs/kernel/include/xen/mem-reservation.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun /*
4*4882a593Smuzhiyun  * Xen memory reservation utilities.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (c) 2003, B Dragovic
7*4882a593Smuzhiyun  * Copyright (c) 2003-2004, M Williamson, K Fraser
8*4882a593Smuzhiyun  * Copyright (c) 2005 Dan M. Smith, IBM Corporation
9*4882a593Smuzhiyun  * Copyright (c) 2010 Daniel Kiper
10*4882a593Smuzhiyun  * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #ifndef _XENMEM_RESERVATION_H
14*4882a593Smuzhiyun #define _XENMEM_RESERVATION_H
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <linux/highmem.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <xen/page.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun extern bool xen_scrub_pages;
21*4882a593Smuzhiyun 
xenmem_reservation_scrub_page(struct page * page)22*4882a593Smuzhiyun static inline void xenmem_reservation_scrub_page(struct page *page)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun 	if (xen_scrub_pages)
25*4882a593Smuzhiyun 		clear_highpage(page);
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #ifdef CONFIG_XEN_HAVE_PVMMU
29*4882a593Smuzhiyun void __xenmem_reservation_va_mapping_update(unsigned long count,
30*4882a593Smuzhiyun 					    struct page **pages,
31*4882a593Smuzhiyun 					    xen_pfn_t *frames);
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun void __xenmem_reservation_va_mapping_reset(unsigned long count,
34*4882a593Smuzhiyun 					   struct page **pages);
35*4882a593Smuzhiyun #endif
36*4882a593Smuzhiyun 
xenmem_reservation_va_mapping_update(unsigned long count,struct page ** pages,xen_pfn_t * frames)37*4882a593Smuzhiyun static inline void xenmem_reservation_va_mapping_update(unsigned long count,
38*4882a593Smuzhiyun 							struct page **pages,
39*4882a593Smuzhiyun 							xen_pfn_t *frames)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun #ifdef CONFIG_XEN_HAVE_PVMMU
42*4882a593Smuzhiyun 	if (!xen_feature(XENFEAT_auto_translated_physmap))
43*4882a593Smuzhiyun 		__xenmem_reservation_va_mapping_update(count, pages, frames);
44*4882a593Smuzhiyun #endif
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
xenmem_reservation_va_mapping_reset(unsigned long count,struct page ** pages)47*4882a593Smuzhiyun static inline void xenmem_reservation_va_mapping_reset(unsigned long count,
48*4882a593Smuzhiyun 						       struct page **pages)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun #ifdef CONFIG_XEN_HAVE_PVMMU
51*4882a593Smuzhiyun 	if (!xen_feature(XENFEAT_auto_translated_physmap))
52*4882a593Smuzhiyun 		__xenmem_reservation_va_mapping_reset(count, pages);
53*4882a593Smuzhiyun #endif
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun int xenmem_reservation_increase(int count, xen_pfn_t *frames);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun int xenmem_reservation_decrease(int count, xen_pfn_t *frames);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #endif
61