xref: /OK3568_Linux_fs/kernel/arch/x86/xen/mmu_hvm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/types.h>
3*4882a593Smuzhiyun #include <linux/crash_dump.h>
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <xen/interface/xen.h>
6*4882a593Smuzhiyun #include <xen/hvm.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include "mmu.h"
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #ifdef CONFIG_PROC_VMCORE
11*4882a593Smuzhiyun /*
12*4882a593Smuzhiyun  * This function is used in two contexts:
13*4882a593Smuzhiyun  * - the kdump kernel has to check whether a pfn of the crashed kernel
14*4882a593Smuzhiyun  *   was a ballooned page. vmcore is using this function to decide
15*4882a593Smuzhiyun  *   whether to access a pfn of the crashed kernel.
16*4882a593Smuzhiyun  * - the kexec kernel has to check whether a pfn was ballooned by the
17*4882a593Smuzhiyun  *   previous kernel. If the pfn is ballooned, handle it properly.
18*4882a593Smuzhiyun  * Returns 0 if the pfn is not backed by a RAM page, the caller may
19*4882a593Smuzhiyun  * handle the pfn special in this case.
20*4882a593Smuzhiyun  */
xen_oldmem_pfn_is_ram(unsigned long pfn)21*4882a593Smuzhiyun static int xen_oldmem_pfn_is_ram(unsigned long pfn)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	struct xen_hvm_get_mem_type a = {
24*4882a593Smuzhiyun 		.domid = DOMID_SELF,
25*4882a593Smuzhiyun 		.pfn = pfn,
26*4882a593Smuzhiyun 	};
27*4882a593Smuzhiyun 	int ram;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
30*4882a593Smuzhiyun 		return -ENXIO;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	switch (a.mem_type) {
33*4882a593Smuzhiyun 	case HVMMEM_mmio_dm:
34*4882a593Smuzhiyun 		ram = 0;
35*4882a593Smuzhiyun 		break;
36*4882a593Smuzhiyun 	case HVMMEM_ram_rw:
37*4882a593Smuzhiyun 	case HVMMEM_ram_ro:
38*4882a593Smuzhiyun 	default:
39*4882a593Smuzhiyun 		ram = 1;
40*4882a593Smuzhiyun 		break;
41*4882a593Smuzhiyun 	}
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	return ram;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun #endif
46*4882a593Smuzhiyun 
xen_hvm_exit_mmap(struct mm_struct * mm)47*4882a593Smuzhiyun static void xen_hvm_exit_mmap(struct mm_struct *mm)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	struct xen_hvm_pagetable_dying a;
50*4882a593Smuzhiyun 	int rc;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	a.domid = DOMID_SELF;
53*4882a593Smuzhiyun 	a.gpa = __pa(mm->pgd);
54*4882a593Smuzhiyun 	rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
55*4882a593Smuzhiyun 	WARN_ON_ONCE(rc < 0);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
is_pagetable_dying_supported(void)58*4882a593Smuzhiyun static int is_pagetable_dying_supported(void)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	struct xen_hvm_pagetable_dying a;
61*4882a593Smuzhiyun 	int rc = 0;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	a.domid = DOMID_SELF;
64*4882a593Smuzhiyun 	a.gpa = 0x00;
65*4882a593Smuzhiyun 	rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
66*4882a593Smuzhiyun 	if (rc < 0) {
67*4882a593Smuzhiyun 		printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
68*4882a593Smuzhiyun 		return 0;
69*4882a593Smuzhiyun 	}
70*4882a593Smuzhiyun 	return 1;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
xen_hvm_init_mmu_ops(void)73*4882a593Smuzhiyun void __init xen_hvm_init_mmu_ops(void)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	if (is_pagetable_dying_supported())
76*4882a593Smuzhiyun 		pv_ops.mmu.exit_mmap = xen_hvm_exit_mmap;
77*4882a593Smuzhiyun #ifdef CONFIG_PROC_VMCORE
78*4882a593Smuzhiyun 	WARN_ON(register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram));
79*4882a593Smuzhiyun #endif
80*4882a593Smuzhiyun }
81