xref: /OK3568_Linux_fs/kernel/arch/powerpc/mm/copro_fault.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * CoProcessor (SPU/AFU) mm fault handler
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * (C) Copyright IBM Deutschland Entwicklung GmbH 2007
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Author: Arnd Bergmann <arndb@de.ibm.com>
8*4882a593Smuzhiyun  * Author: Jeremy Kerr <jk@ozlabs.org>
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun #include <linux/sched.h>
11*4882a593Smuzhiyun #include <linux/mm.h>
12*4882a593Smuzhiyun #include <linux/export.h>
13*4882a593Smuzhiyun #include <asm/reg.h>
14*4882a593Smuzhiyun #include <asm/copro.h>
15*4882a593Smuzhiyun #include <asm/spu.h>
16*4882a593Smuzhiyun #include <misc/cxl-base.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun  * This ought to be kept in sync with the powerpc specific do_page_fault
20*4882a593Smuzhiyun  * function. Currently, there are a few corner cases that we haven't had
21*4882a593Smuzhiyun  * to handle fortunately.
22*4882a593Smuzhiyun  */
copro_handle_mm_fault(struct mm_struct * mm,unsigned long ea,unsigned long dsisr,vm_fault_t * flt)23*4882a593Smuzhiyun int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
24*4882a593Smuzhiyun 		unsigned long dsisr, vm_fault_t *flt)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun 	struct vm_area_struct *vma;
27*4882a593Smuzhiyun 	unsigned long is_write;
28*4882a593Smuzhiyun 	int ret;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	if (mm == NULL)
31*4882a593Smuzhiyun 		return -EFAULT;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	if (mm->pgd == NULL)
34*4882a593Smuzhiyun 		return -EFAULT;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	mmap_read_lock(mm);
37*4882a593Smuzhiyun 	ret = -EFAULT;
38*4882a593Smuzhiyun 	vma = find_vma(mm, ea);
39*4882a593Smuzhiyun 	if (!vma)
40*4882a593Smuzhiyun 		goto out_unlock;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	if (ea < vma->vm_start) {
43*4882a593Smuzhiyun 		if (!(vma->vm_flags & VM_GROWSDOWN))
44*4882a593Smuzhiyun 			goto out_unlock;
45*4882a593Smuzhiyun 		if (expand_stack(vma, ea))
46*4882a593Smuzhiyun 			goto out_unlock;
47*4882a593Smuzhiyun 	}
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	is_write = dsisr & DSISR_ISSTORE;
50*4882a593Smuzhiyun 	if (is_write) {
51*4882a593Smuzhiyun 		if (!(vma->vm_flags & VM_WRITE))
52*4882a593Smuzhiyun 			goto out_unlock;
53*4882a593Smuzhiyun 	} else {
54*4882a593Smuzhiyun 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
55*4882a593Smuzhiyun 			goto out_unlock;
56*4882a593Smuzhiyun 		/*
57*4882a593Smuzhiyun 		 * PROT_NONE is covered by the VMA check above.
58*4882a593Smuzhiyun 		 * and hash should get a NOHPTE fault instead of
59*4882a593Smuzhiyun 		 * a PROTFAULT in case fixup is needed for things
60*4882a593Smuzhiyun 		 * like autonuma.
61*4882a593Smuzhiyun 		 */
62*4882a593Smuzhiyun 		if (!radix_enabled())
63*4882a593Smuzhiyun 			WARN_ON_ONCE(dsisr & DSISR_PROTFAULT);
64*4882a593Smuzhiyun 	}
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	ret = 0;
67*4882a593Smuzhiyun 	*flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0, NULL);
68*4882a593Smuzhiyun 	if (unlikely(*flt & VM_FAULT_ERROR)) {
69*4882a593Smuzhiyun 		if (*flt & VM_FAULT_OOM) {
70*4882a593Smuzhiyun 			ret = -ENOMEM;
71*4882a593Smuzhiyun 			goto out_unlock;
72*4882a593Smuzhiyun 		} else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
73*4882a593Smuzhiyun 			ret = -EFAULT;
74*4882a593Smuzhiyun 			goto out_unlock;
75*4882a593Smuzhiyun 		}
76*4882a593Smuzhiyun 		BUG();
77*4882a593Smuzhiyun 	}
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun out_unlock:
80*4882a593Smuzhiyun 	mmap_read_unlock(mm);
81*4882a593Smuzhiyun 	return ret;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
84*4882a593Smuzhiyun 
copro_calculate_slb(struct mm_struct * mm,u64 ea,struct copro_slb * slb)85*4882a593Smuzhiyun int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	u64 vsid, vsidkey;
88*4882a593Smuzhiyun 	int psize, ssize;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	switch (get_region_id(ea)) {
91*4882a593Smuzhiyun 	case USER_REGION_ID:
92*4882a593Smuzhiyun 		pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
93*4882a593Smuzhiyun 		if (mm == NULL)
94*4882a593Smuzhiyun 			return 1;
95*4882a593Smuzhiyun 		psize = get_slice_psize(mm, ea);
96*4882a593Smuzhiyun 		ssize = user_segment_size(ea);
97*4882a593Smuzhiyun 		vsid = get_user_vsid(&mm->context, ea, ssize);
98*4882a593Smuzhiyun 		vsidkey = SLB_VSID_USER;
99*4882a593Smuzhiyun 		break;
100*4882a593Smuzhiyun 	case VMALLOC_REGION_ID:
101*4882a593Smuzhiyun 		pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea);
102*4882a593Smuzhiyun 		psize = mmu_vmalloc_psize;
103*4882a593Smuzhiyun 		ssize = mmu_kernel_ssize;
104*4882a593Smuzhiyun 		vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
105*4882a593Smuzhiyun 		vsidkey = SLB_VSID_KERNEL;
106*4882a593Smuzhiyun 		break;
107*4882a593Smuzhiyun 	case IO_REGION_ID:
108*4882a593Smuzhiyun 		pr_devel("%s: 0x%llx -- IO_REGION_ID\n", __func__, ea);
109*4882a593Smuzhiyun 		psize = mmu_io_psize;
110*4882a593Smuzhiyun 		ssize = mmu_kernel_ssize;
111*4882a593Smuzhiyun 		vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
112*4882a593Smuzhiyun 		vsidkey = SLB_VSID_KERNEL;
113*4882a593Smuzhiyun 		break;
114*4882a593Smuzhiyun 	case LINEAR_MAP_REGION_ID:
115*4882a593Smuzhiyun 		pr_devel("%s: 0x%llx -- LINEAR_MAP_REGION_ID\n", __func__, ea);
116*4882a593Smuzhiyun 		psize = mmu_linear_psize;
117*4882a593Smuzhiyun 		ssize = mmu_kernel_ssize;
118*4882a593Smuzhiyun 		vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
119*4882a593Smuzhiyun 		vsidkey = SLB_VSID_KERNEL;
120*4882a593Smuzhiyun 		break;
121*4882a593Smuzhiyun 	default:
122*4882a593Smuzhiyun 		pr_debug("%s: invalid region access at %016llx\n", __func__, ea);
123*4882a593Smuzhiyun 		return 1;
124*4882a593Smuzhiyun 	}
125*4882a593Smuzhiyun 	/* Bad address */
126*4882a593Smuzhiyun 	if (!vsid)
127*4882a593Smuzhiyun 		return 1;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	vsid = (vsid << slb_vsid_shift(ssize)) | vsidkey;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	vsid |= mmu_psize_defs[psize].sllp |
132*4882a593Smuzhiyun 		((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V;
135*4882a593Smuzhiyun 	slb->vsid = vsid;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	return 0;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(copro_calculate_slb);
140*4882a593Smuzhiyun 
copro_flush_all_slbs(struct mm_struct * mm)141*4882a593Smuzhiyun void copro_flush_all_slbs(struct mm_struct *mm)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun #ifdef CONFIG_SPU_BASE
144*4882a593Smuzhiyun 	spu_flush_all_slbs(mm);
145*4882a593Smuzhiyun #endif
146*4882a593Smuzhiyun 	cxl_slbia(mm);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(copro_flush_all_slbs);
149