1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * arch/xtensa/mm/cache.c
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
5*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
6*4882a593Smuzhiyun * for more details.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Copyright (C) 2001-2006 Tensilica Inc.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Chris Zankel <chris@zankel.net>
11*4882a593Smuzhiyun * Joe Taylor
12*4882a593Smuzhiyun * Marc Gauthier
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <linux/init.h>
17*4882a593Smuzhiyun #include <linux/signal.h>
18*4882a593Smuzhiyun #include <linux/sched.h>
19*4882a593Smuzhiyun #include <linux/kernel.h>
20*4882a593Smuzhiyun #include <linux/errno.h>
21*4882a593Smuzhiyun #include <linux/string.h>
22*4882a593Smuzhiyun #include <linux/types.h>
23*4882a593Smuzhiyun #include <linux/ptrace.h>
24*4882a593Smuzhiyun #include <linux/memblock.h>
25*4882a593Smuzhiyun #include <linux/swap.h>
26*4882a593Smuzhiyun #include <linux/pagemap.h>
27*4882a593Smuzhiyun #include <linux/pgtable.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <asm/bootparam.h>
30*4882a593Smuzhiyun #include <asm/mmu_context.h>
31*4882a593Smuzhiyun #include <asm/tlb.h>
32*4882a593Smuzhiyun #include <asm/tlbflush.h>
33*4882a593Smuzhiyun #include <asm/page.h>
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun * Note:
37*4882a593Smuzhiyun * The kernel provides one architecture bit PG_arch_1 in the page flags that
38*4882a593Smuzhiyun * can be used for cache coherency.
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun * I$-D$ coherency.
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * The Xtensa architecture doesn't keep the instruction cache coherent with
43*4882a593Smuzhiyun * the data cache. We use the architecture bit to indicate if the caches
44*4882a593Smuzhiyun * are coherent. The kernel clears this bit whenever a page is added to the
45*4882a593Smuzhiyun * page cache. At that time, the caches might not be in sync. We, therefore,
46*4882a593Smuzhiyun * define this flag as 'clean' if set.
47*4882a593Smuzhiyun *
48*4882a593Smuzhiyun * D-cache aliasing.
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * With cache aliasing, we have to always flush the cache when pages are
51*4882a593Smuzhiyun * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
52*4882a593Smuzhiyun * page.
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun */
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #if (DCACHE_WAY_SIZE > PAGE_SIZE)
kmap_invalidate_coherent(struct page * page,unsigned long vaddr)59*4882a593Smuzhiyun static inline void kmap_invalidate_coherent(struct page *page,
60*4882a593Smuzhiyun unsigned long vaddr)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
63*4882a593Smuzhiyun unsigned long kvaddr;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun if (!PageHighMem(page)) {
66*4882a593Smuzhiyun kvaddr = (unsigned long)page_to_virt(page);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun __invalidate_dcache_page(kvaddr);
69*4882a593Smuzhiyun } else {
70*4882a593Smuzhiyun kvaddr = TLBTEMP_BASE_1 +
71*4882a593Smuzhiyun (page_to_phys(page) & DCACHE_ALIAS_MASK);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun preempt_disable();
74*4882a593Smuzhiyun __invalidate_dcache_page_alias(kvaddr,
75*4882a593Smuzhiyun page_to_phys(page));
76*4882a593Smuzhiyun preempt_enable();
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
coherent_kvaddr(struct page * page,unsigned long base,unsigned long vaddr,unsigned long * paddr)81*4882a593Smuzhiyun static inline void *coherent_kvaddr(struct page *page, unsigned long base,
82*4882a593Smuzhiyun unsigned long vaddr, unsigned long *paddr)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
85*4882a593Smuzhiyun *paddr = page_to_phys(page);
86*4882a593Smuzhiyun return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
87*4882a593Smuzhiyun } else {
88*4882a593Smuzhiyun *paddr = 0;
89*4882a593Smuzhiyun return page_to_virt(page);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
clear_user_highpage(struct page * page,unsigned long vaddr)93*4882a593Smuzhiyun void clear_user_highpage(struct page *page, unsigned long vaddr)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun unsigned long paddr;
96*4882a593Smuzhiyun void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun preempt_disable();
99*4882a593Smuzhiyun kmap_invalidate_coherent(page, vaddr);
100*4882a593Smuzhiyun set_bit(PG_arch_1, &page->flags);
101*4882a593Smuzhiyun clear_page_alias(kvaddr, paddr);
102*4882a593Smuzhiyun preempt_enable();
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun EXPORT_SYMBOL(clear_user_highpage);
105*4882a593Smuzhiyun
copy_user_highpage(struct page * dst,struct page * src,unsigned long vaddr,struct vm_area_struct * vma)106*4882a593Smuzhiyun void copy_user_highpage(struct page *dst, struct page *src,
107*4882a593Smuzhiyun unsigned long vaddr, struct vm_area_struct *vma)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun unsigned long dst_paddr, src_paddr;
110*4882a593Smuzhiyun void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
111*4882a593Smuzhiyun &dst_paddr);
112*4882a593Smuzhiyun void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
113*4882a593Smuzhiyun &src_paddr);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun preempt_disable();
116*4882a593Smuzhiyun kmap_invalidate_coherent(dst, vaddr);
117*4882a593Smuzhiyun set_bit(PG_arch_1, &dst->flags);
118*4882a593Smuzhiyun copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
119*4882a593Smuzhiyun preempt_enable();
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun EXPORT_SYMBOL(copy_user_highpage);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun * Any time the kernel writes to a user page cache page, or it is about to
125*4882a593Smuzhiyun * read from a page cache page this routine is called.
126*4882a593Smuzhiyun *
127*4882a593Smuzhiyun */
128*4882a593Smuzhiyun
flush_dcache_page(struct page * page)129*4882a593Smuzhiyun void flush_dcache_page(struct page *page)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun struct address_space *mapping = page_mapping_file(page);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /*
134*4882a593Smuzhiyun * If we have a mapping but the page is not mapped to user-space
135*4882a593Smuzhiyun * yet, we simply mark this page dirty and defer flushing the
136*4882a593Smuzhiyun * caches until update_mmu().
137*4882a593Smuzhiyun */
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun if (mapping && !mapping_mapped(mapping)) {
140*4882a593Smuzhiyun if (!test_bit(PG_arch_1, &page->flags))
141*4882a593Smuzhiyun set_bit(PG_arch_1, &page->flags);
142*4882a593Smuzhiyun return;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun } else {
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun unsigned long phys = page_to_phys(page);
147*4882a593Smuzhiyun unsigned long temp = page->index << PAGE_SHIFT;
148*4882a593Smuzhiyun unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
149*4882a593Smuzhiyun unsigned long virt;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun * Flush the page in kernel space and user space.
153*4882a593Smuzhiyun * Note that we can omit that step if aliasing is not
154*4882a593Smuzhiyun * an issue, but we do have to synchronize I$ and D$
155*4882a593Smuzhiyun * if we have a mapping.
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun if (!alias && !mapping)
159*4882a593Smuzhiyun return;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun preempt_disable();
162*4882a593Smuzhiyun virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
163*4882a593Smuzhiyun __flush_invalidate_dcache_page_alias(virt, phys);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (alias)
168*4882a593Smuzhiyun __flush_invalidate_dcache_page_alias(virt, phys);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (mapping)
171*4882a593Smuzhiyun __invalidate_icache_page_alias(virt, phys);
172*4882a593Smuzhiyun preempt_enable();
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /* There shouldn't be an entry in the cache for this page anymore. */
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun EXPORT_SYMBOL(flush_dcache_page);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun * For now, flush the whole cache. FIXME??
181*4882a593Smuzhiyun */
182*4882a593Smuzhiyun
local_flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)183*4882a593Smuzhiyun void local_flush_cache_range(struct vm_area_struct *vma,
184*4882a593Smuzhiyun unsigned long start, unsigned long end)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun __flush_invalidate_dcache_all();
187*4882a593Smuzhiyun __invalidate_icache_all();
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun EXPORT_SYMBOL(local_flush_cache_range);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /*
192*4882a593Smuzhiyun * Remove any entry in the cache for this page.
193*4882a593Smuzhiyun *
194*4882a593Smuzhiyun * Note that this function is only called for user pages, so use the
195*4882a593Smuzhiyun * alias versions of the cache flush functions.
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun
local_flush_cache_page(struct vm_area_struct * vma,unsigned long address,unsigned long pfn)198*4882a593Smuzhiyun void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
199*4882a593Smuzhiyun unsigned long pfn)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun /* Note that we have to use the 'alias' address to avoid multi-hit */
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun unsigned long phys = page_to_phys(pfn_to_page(pfn));
204*4882a593Smuzhiyun unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun preempt_disable();
207*4882a593Smuzhiyun __flush_invalidate_dcache_page_alias(virt, phys);
208*4882a593Smuzhiyun __invalidate_icache_page_alias(virt, phys);
209*4882a593Smuzhiyun preempt_enable();
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun EXPORT_SYMBOL(local_flush_cache_page);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun void
update_mmu_cache(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)216*4882a593Smuzhiyun update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun unsigned long pfn = pte_pfn(*ptep);
219*4882a593Smuzhiyun struct page *page;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun if (!pfn_valid(pfn))
222*4882a593Smuzhiyun return;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun page = pfn_to_page(pfn);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /* Invalidate old entry in TLBs */
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun flush_tlb_page(vma, addr);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun #if (DCACHE_WAY_SIZE > PAGE_SIZE)
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
233*4882a593Smuzhiyun unsigned long phys = page_to_phys(page);
234*4882a593Smuzhiyun unsigned long tmp;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun preempt_disable();
237*4882a593Smuzhiyun tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
238*4882a593Smuzhiyun __flush_invalidate_dcache_page_alias(tmp, phys);
239*4882a593Smuzhiyun tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
240*4882a593Smuzhiyun __flush_invalidate_dcache_page_alias(tmp, phys);
241*4882a593Smuzhiyun __invalidate_icache_page_alias(tmp, phys);
242*4882a593Smuzhiyun preempt_enable();
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun clear_bit(PG_arch_1, &page->flags);
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun #else
247*4882a593Smuzhiyun if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
248*4882a593Smuzhiyun && (vma->vm_flags & VM_EXEC) != 0) {
249*4882a593Smuzhiyun unsigned long paddr = (unsigned long)kmap_atomic(page);
250*4882a593Smuzhiyun __flush_dcache_page(paddr);
251*4882a593Smuzhiyun __invalidate_icache_page(paddr);
252*4882a593Smuzhiyun set_bit(PG_arch_1, &page->flags);
253*4882a593Smuzhiyun kunmap_atomic((void *)paddr);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun #endif
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /*
259*4882a593Smuzhiyun * access_process_vm() has called get_user_pages(), which has done a
260*4882a593Smuzhiyun * flush_dcache_page() on the page.
261*4882a593Smuzhiyun */
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun #if (DCACHE_WAY_SIZE > PAGE_SIZE)
264*4882a593Smuzhiyun
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,const void * src,unsigned long len)265*4882a593Smuzhiyun void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
266*4882a593Smuzhiyun unsigned long vaddr, void *dst, const void *src,
267*4882a593Smuzhiyun unsigned long len)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun unsigned long phys = page_to_phys(page);
270*4882a593Smuzhiyun unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* Flush and invalidate user page if aliased. */
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun if (alias) {
275*4882a593Smuzhiyun unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
276*4882a593Smuzhiyun preempt_disable();
277*4882a593Smuzhiyun __flush_invalidate_dcache_page_alias(t, phys);
278*4882a593Smuzhiyun preempt_enable();
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun /* Copy data */
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun memcpy(dst, src, len);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /*
286*4882a593Smuzhiyun * Flush and invalidate kernel page if aliased and synchronize
287*4882a593Smuzhiyun * data and instruction caches for executable pages.
288*4882a593Smuzhiyun */
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun if (alias) {
291*4882a593Smuzhiyun unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun preempt_disable();
294*4882a593Smuzhiyun __flush_invalidate_dcache_range((unsigned long) dst, len);
295*4882a593Smuzhiyun if ((vma->vm_flags & VM_EXEC) != 0)
296*4882a593Smuzhiyun __invalidate_icache_page_alias(t, phys);
297*4882a593Smuzhiyun preempt_enable();
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun } else if ((vma->vm_flags & VM_EXEC) != 0) {
300*4882a593Smuzhiyun __flush_dcache_range((unsigned long)dst,len);
301*4882a593Smuzhiyun __invalidate_icache_range((unsigned long) dst, len);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
copy_from_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,const void * src,unsigned long len)305*4882a593Smuzhiyun extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
306*4882a593Smuzhiyun unsigned long vaddr, void *dst, const void *src,
307*4882a593Smuzhiyun unsigned long len)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun unsigned long phys = page_to_phys(page);
310*4882a593Smuzhiyun unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /*
313*4882a593Smuzhiyun * Flush user page if aliased.
314*4882a593Smuzhiyun * (Note: a simply flush would be sufficient)
315*4882a593Smuzhiyun */
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun if (alias) {
318*4882a593Smuzhiyun unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
319*4882a593Smuzhiyun preempt_disable();
320*4882a593Smuzhiyun __flush_invalidate_dcache_page_alias(t, phys);
321*4882a593Smuzhiyun preempt_enable();
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun memcpy(dst, src, len);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun #endif
328