xref: /OK3568_Linux_fs/kernel/arch/csky/abiv1/cacheflush.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include <linux/kernel.h>
5*4882a593Smuzhiyun #include <linux/mm.h>
6*4882a593Smuzhiyun #include <linux/fs.h>
7*4882a593Smuzhiyun #include <linux/syscalls.h>
8*4882a593Smuzhiyun #include <linux/spinlock.h>
9*4882a593Smuzhiyun #include <asm/page.h>
10*4882a593Smuzhiyun #include <asm/cache.h>
11*4882a593Smuzhiyun #include <asm/cacheflush.h>
12*4882a593Smuzhiyun #include <asm/cachectl.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #define PG_dcache_clean		PG_arch_1
15*4882a593Smuzhiyun 
flush_dcache_page(struct page * page)16*4882a593Smuzhiyun void flush_dcache_page(struct page *page)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun 	struct address_space *mapping;
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun 	if (page == ZERO_PAGE(0))
21*4882a593Smuzhiyun 		return;
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun 	mapping = page_mapping_file(page);
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	if (mapping && !page_mapcount(page))
26*4882a593Smuzhiyun 		clear_bit(PG_dcache_clean, &page->flags);
27*4882a593Smuzhiyun 	else {
28*4882a593Smuzhiyun 		dcache_wbinv_all();
29*4882a593Smuzhiyun 		if (mapping)
30*4882a593Smuzhiyun 			icache_inv_all();
31*4882a593Smuzhiyun 		set_bit(PG_dcache_clean, &page->flags);
32*4882a593Smuzhiyun 	}
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun EXPORT_SYMBOL(flush_dcache_page);
35*4882a593Smuzhiyun 
update_mmu_cache(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)36*4882a593Smuzhiyun void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
37*4882a593Smuzhiyun 	pte_t *ptep)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	unsigned long pfn = pte_pfn(*ptep);
40*4882a593Smuzhiyun 	struct page *page;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	if (!pfn_valid(pfn))
43*4882a593Smuzhiyun 		return;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	page = pfn_to_page(pfn);
46*4882a593Smuzhiyun 	if (page == ZERO_PAGE(0))
47*4882a593Smuzhiyun 		return;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
50*4882a593Smuzhiyun 		dcache_wbinv_all();
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	if (page_mapping_file(page)) {
53*4882a593Smuzhiyun 		if (vma->vm_flags & VM_EXEC)
54*4882a593Smuzhiyun 			icache_inv_all();
55*4882a593Smuzhiyun 	}
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
flush_kernel_dcache_page(struct page * page)58*4882a593Smuzhiyun void flush_kernel_dcache_page(struct page *page)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	struct address_space *mapping;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	mapping = page_mapping_file(page);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	if (!mapping || mapping_mapped(mapping))
65*4882a593Smuzhiyun 		dcache_wbinv_all();
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun EXPORT_SYMBOL(flush_kernel_dcache_page);
68*4882a593Smuzhiyun 
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)69*4882a593Smuzhiyun void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
70*4882a593Smuzhiyun 	unsigned long end)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	dcache_wbinv_all();
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	if (vma->vm_flags & VM_EXEC)
75*4882a593Smuzhiyun 		icache_inv_all();
76*4882a593Smuzhiyun }
77