1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/arch/m68k/mm/kmap.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 1997 Roman Hodek
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * 10/01/99 cleaned up the code and changing to the same interface
8*4882a593Smuzhiyun * used by other architectures /Roman Zippel
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/mm.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/string.h>
15*4882a593Smuzhiyun #include <linux/types.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/vmalloc.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <asm/setup.h>
20*4882a593Smuzhiyun #include <asm/segment.h>
21*4882a593Smuzhiyun #include <asm/page.h>
22*4882a593Smuzhiyun #include <asm/io.h>
23*4882a593Smuzhiyun #include <asm/tlbflush.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #undef DEBUG
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun * For 040/060 we can use the virtual memory area like other architectures,
29*4882a593Smuzhiyun * but for 020/030 we want to use early termination page descriptors and we
30*4882a593Smuzhiyun * can't mix this with normal page descriptors, so we have to copy that code
31*4882a593Smuzhiyun * (mm/vmalloc.c) and return appropriately aligned addresses.
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #ifdef CPU_M68040_OR_M68060_ONLY
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #define IO_SIZE PAGE_SIZE
37*4882a593Smuzhiyun
get_io_area(unsigned long size)38*4882a593Smuzhiyun static inline struct vm_struct *get_io_area(unsigned long size)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun return get_vm_area(size, VM_IOREMAP);
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun
free_io_area(void * addr)44*4882a593Smuzhiyun static inline void free_io_area(void *addr)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun vfree((void *)(PAGE_MASK & (unsigned long)addr));
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #else
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #define IO_SIZE PMD_SIZE
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun static struct vm_struct *iolist;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * __free_io_area unmaps nearly everything, so be careful
57*4882a593Smuzhiyun * Currently it doesn't free pointer/page tables anymore but this
58*4882a593Smuzhiyun * wasn't used anyway and might be added later.
59*4882a593Smuzhiyun */
__free_io_area(void * addr,unsigned long size)60*4882a593Smuzhiyun static void __free_io_area(void *addr, unsigned long size)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun unsigned long virtaddr = (unsigned long)addr;
63*4882a593Smuzhiyun pgd_t *pgd_dir;
64*4882a593Smuzhiyun p4d_t *p4d_dir;
65*4882a593Smuzhiyun pud_t *pud_dir;
66*4882a593Smuzhiyun pmd_t *pmd_dir;
67*4882a593Smuzhiyun pte_t *pte_dir;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun while ((long)size > 0) {
70*4882a593Smuzhiyun pgd_dir = pgd_offset_k(virtaddr);
71*4882a593Smuzhiyun p4d_dir = p4d_offset(pgd_dir, virtaddr);
72*4882a593Smuzhiyun pud_dir = pud_offset(p4d_dir, virtaddr);
73*4882a593Smuzhiyun if (pud_bad(*pud_dir)) {
74*4882a593Smuzhiyun printk("iounmap: bad pud(%08lx)\n", pud_val(*pud_dir));
75*4882a593Smuzhiyun pud_clear(pud_dir);
76*4882a593Smuzhiyun return;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun pmd_dir = pmd_offset(pud_dir, virtaddr);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun #if CONFIG_PGTABLE_LEVELS == 3
81*4882a593Smuzhiyun if (CPU_IS_020_OR_030) {
82*4882a593Smuzhiyun int pmd_type = pmd_val(*pmd_dir) & _DESCTYPE_MASK;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun if (pmd_type == _PAGE_PRESENT) {
85*4882a593Smuzhiyun pmd_clear(pmd_dir);
86*4882a593Smuzhiyun virtaddr += PMD_SIZE;
87*4882a593Smuzhiyun size -= PMD_SIZE;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun } else if (pmd_type == 0)
90*4882a593Smuzhiyun continue;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun #endif
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun if (pmd_bad(*pmd_dir)) {
95*4882a593Smuzhiyun printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
96*4882a593Smuzhiyun pmd_clear(pmd_dir);
97*4882a593Smuzhiyun return;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun pte_val(*pte_dir) = 0;
102*4882a593Smuzhiyun virtaddr += PAGE_SIZE;
103*4882a593Smuzhiyun size -= PAGE_SIZE;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun flush_tlb_all();
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
get_io_area(unsigned long size)109*4882a593Smuzhiyun static struct vm_struct *get_io_area(unsigned long size)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun unsigned long addr;
112*4882a593Smuzhiyun struct vm_struct **p, *tmp, *area;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun area = kmalloc(sizeof(*area), GFP_KERNEL);
115*4882a593Smuzhiyun if (!area)
116*4882a593Smuzhiyun return NULL;
117*4882a593Smuzhiyun addr = KMAP_START;
118*4882a593Smuzhiyun for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
119*4882a593Smuzhiyun if (size + addr < (unsigned long)tmp->addr)
120*4882a593Smuzhiyun break;
121*4882a593Smuzhiyun if (addr > KMAP_END-size) {
122*4882a593Smuzhiyun kfree(area);
123*4882a593Smuzhiyun return NULL;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun addr = tmp->size + (unsigned long)tmp->addr;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun area->addr = (void *)addr;
128*4882a593Smuzhiyun area->size = size + IO_SIZE;
129*4882a593Smuzhiyun area->next = *p;
130*4882a593Smuzhiyun *p = area;
131*4882a593Smuzhiyun return area;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
free_io_area(void * addr)134*4882a593Smuzhiyun static inline void free_io_area(void *addr)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun struct vm_struct **p, *tmp;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun if (!addr)
139*4882a593Smuzhiyun return;
140*4882a593Smuzhiyun addr = (void *)((unsigned long)addr & -IO_SIZE);
141*4882a593Smuzhiyun for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
142*4882a593Smuzhiyun if (tmp->addr == addr) {
143*4882a593Smuzhiyun *p = tmp->next;
144*4882a593Smuzhiyun /* remove gap added in get_io_area() */
145*4882a593Smuzhiyun __free_io_area(tmp->addr, tmp->size - IO_SIZE);
146*4882a593Smuzhiyun kfree(tmp);
147*4882a593Smuzhiyun return;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun #endif
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /*
155*4882a593Smuzhiyun * Map some physical address range into the kernel address space.
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun /* Rewritten by Andreas Schwab to remove all races. */
158*4882a593Smuzhiyun
__ioremap(unsigned long physaddr,unsigned long size,int cacheflag)159*4882a593Smuzhiyun void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun struct vm_struct *area;
162*4882a593Smuzhiyun unsigned long virtaddr, retaddr;
163*4882a593Smuzhiyun long offset;
164*4882a593Smuzhiyun pgd_t *pgd_dir;
165*4882a593Smuzhiyun p4d_t *p4d_dir;
166*4882a593Smuzhiyun pud_t *pud_dir;
167*4882a593Smuzhiyun pmd_t *pmd_dir;
168*4882a593Smuzhiyun pte_t *pte_dir;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /*
171*4882a593Smuzhiyun * Don't allow mappings that wrap..
172*4882a593Smuzhiyun */
173*4882a593Smuzhiyun if (!size || physaddr > (unsigned long)(-size))
174*4882a593Smuzhiyun return NULL;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun #ifdef CONFIG_AMIGA
177*4882a593Smuzhiyun if (MACH_IS_AMIGA) {
178*4882a593Smuzhiyun if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
179*4882a593Smuzhiyun && (cacheflag == IOMAP_NOCACHE_SER))
180*4882a593Smuzhiyun return (void __iomem *)physaddr;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun #endif
183*4882a593Smuzhiyun #ifdef CONFIG_COLDFIRE
184*4882a593Smuzhiyun if (__cf_internalio(physaddr))
185*4882a593Smuzhiyun return (void __iomem *) physaddr;
186*4882a593Smuzhiyun #endif
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun #ifdef DEBUG
189*4882a593Smuzhiyun printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
190*4882a593Smuzhiyun #endif
191*4882a593Smuzhiyun /*
192*4882a593Smuzhiyun * Mappings have to be aligned
193*4882a593Smuzhiyun */
194*4882a593Smuzhiyun offset = physaddr & (IO_SIZE - 1);
195*4882a593Smuzhiyun physaddr &= -IO_SIZE;
196*4882a593Smuzhiyun size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun * Ok, go for it..
200*4882a593Smuzhiyun */
201*4882a593Smuzhiyun area = get_io_area(size);
202*4882a593Smuzhiyun if (!area)
203*4882a593Smuzhiyun return NULL;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun virtaddr = (unsigned long)area->addr;
206*4882a593Smuzhiyun retaddr = virtaddr + offset;
207*4882a593Smuzhiyun #ifdef DEBUG
208*4882a593Smuzhiyun printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
209*4882a593Smuzhiyun #endif
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /*
212*4882a593Smuzhiyun * add cache and table flags to physical address
213*4882a593Smuzhiyun */
214*4882a593Smuzhiyun if (CPU_IS_040_OR_060) {
215*4882a593Smuzhiyun physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
216*4882a593Smuzhiyun _PAGE_ACCESSED | _PAGE_DIRTY);
217*4882a593Smuzhiyun switch (cacheflag) {
218*4882a593Smuzhiyun case IOMAP_FULL_CACHING:
219*4882a593Smuzhiyun physaddr |= _PAGE_CACHE040;
220*4882a593Smuzhiyun break;
221*4882a593Smuzhiyun case IOMAP_NOCACHE_SER:
222*4882a593Smuzhiyun default:
223*4882a593Smuzhiyun physaddr |= _PAGE_NOCACHE_S;
224*4882a593Smuzhiyun break;
225*4882a593Smuzhiyun case IOMAP_NOCACHE_NONSER:
226*4882a593Smuzhiyun physaddr |= _PAGE_NOCACHE;
227*4882a593Smuzhiyun break;
228*4882a593Smuzhiyun case IOMAP_WRITETHROUGH:
229*4882a593Smuzhiyun physaddr |= _PAGE_CACHE040W;
230*4882a593Smuzhiyun break;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun } else {
233*4882a593Smuzhiyun physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
234*4882a593Smuzhiyun _PAGE_DIRTY | _PAGE_READWRITE);
235*4882a593Smuzhiyun switch (cacheflag) {
236*4882a593Smuzhiyun case IOMAP_NOCACHE_SER:
237*4882a593Smuzhiyun case IOMAP_NOCACHE_NONSER:
238*4882a593Smuzhiyun default:
239*4882a593Smuzhiyun physaddr |= _PAGE_NOCACHE030;
240*4882a593Smuzhiyun break;
241*4882a593Smuzhiyun case IOMAP_FULL_CACHING:
242*4882a593Smuzhiyun case IOMAP_WRITETHROUGH:
243*4882a593Smuzhiyun break;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun while ((long)size > 0) {
248*4882a593Smuzhiyun #ifdef DEBUG
249*4882a593Smuzhiyun if (!(virtaddr & (PMD_SIZE-1)))
250*4882a593Smuzhiyun printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
251*4882a593Smuzhiyun #endif
252*4882a593Smuzhiyun pgd_dir = pgd_offset_k(virtaddr);
253*4882a593Smuzhiyun p4d_dir = p4d_offset(pgd_dir, virtaddr);
254*4882a593Smuzhiyun pud_dir = pud_offset(p4d_dir, virtaddr);
255*4882a593Smuzhiyun pmd_dir = pmd_alloc(&init_mm, pud_dir, virtaddr);
256*4882a593Smuzhiyun if (!pmd_dir) {
257*4882a593Smuzhiyun printk("ioremap: no mem for pmd_dir\n");
258*4882a593Smuzhiyun return NULL;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun #if CONFIG_PGTABLE_LEVELS == 3
262*4882a593Smuzhiyun if (CPU_IS_020_OR_030) {
263*4882a593Smuzhiyun pmd_val(*pmd_dir) = physaddr;
264*4882a593Smuzhiyun physaddr += PMD_SIZE;
265*4882a593Smuzhiyun virtaddr += PMD_SIZE;
266*4882a593Smuzhiyun size -= PMD_SIZE;
267*4882a593Smuzhiyun } else
268*4882a593Smuzhiyun #endif
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
271*4882a593Smuzhiyun if (!pte_dir) {
272*4882a593Smuzhiyun printk("ioremap: no mem for pte_dir\n");
273*4882a593Smuzhiyun return NULL;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun pte_val(*pte_dir) = physaddr;
277*4882a593Smuzhiyun virtaddr += PAGE_SIZE;
278*4882a593Smuzhiyun physaddr += PAGE_SIZE;
279*4882a593Smuzhiyun size -= PAGE_SIZE;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun #ifdef DEBUG
283*4882a593Smuzhiyun printk("\n");
284*4882a593Smuzhiyun #endif
285*4882a593Smuzhiyun flush_tlb_all();
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun return (void __iomem *)retaddr;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun EXPORT_SYMBOL(__ioremap);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /*
292*4882a593Smuzhiyun * Unmap an ioremap()ed region again
293*4882a593Smuzhiyun */
iounmap(void __iomem * addr)294*4882a593Smuzhiyun void iounmap(void __iomem *addr)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun #ifdef CONFIG_AMIGA
297*4882a593Smuzhiyun if ((!MACH_IS_AMIGA) ||
298*4882a593Smuzhiyun (((unsigned long)addr < 0x40000000) ||
299*4882a593Smuzhiyun ((unsigned long)addr > 0x60000000)))
300*4882a593Smuzhiyun free_io_area((__force void *)addr);
301*4882a593Smuzhiyun #else
302*4882a593Smuzhiyun #ifdef CONFIG_COLDFIRE
303*4882a593Smuzhiyun if (cf_internalio(addr))
304*4882a593Smuzhiyun return;
305*4882a593Smuzhiyun #endif
306*4882a593Smuzhiyun free_io_area((__force void *)addr);
307*4882a593Smuzhiyun #endif
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun EXPORT_SYMBOL(iounmap);
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /*
312*4882a593Smuzhiyun * Set new cache mode for some kernel address space.
313*4882a593Smuzhiyun * The caller must push data for that range itself, if such data may already
314*4882a593Smuzhiyun * be in the cache.
315*4882a593Smuzhiyun */
kernel_set_cachemode(void * addr,unsigned long size,int cmode)316*4882a593Smuzhiyun void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun unsigned long virtaddr = (unsigned long)addr;
319*4882a593Smuzhiyun pgd_t *pgd_dir;
320*4882a593Smuzhiyun p4d_t *p4d_dir;
321*4882a593Smuzhiyun pud_t *pud_dir;
322*4882a593Smuzhiyun pmd_t *pmd_dir;
323*4882a593Smuzhiyun pte_t *pte_dir;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun if (CPU_IS_040_OR_060) {
326*4882a593Smuzhiyun switch (cmode) {
327*4882a593Smuzhiyun case IOMAP_FULL_CACHING:
328*4882a593Smuzhiyun cmode = _PAGE_CACHE040;
329*4882a593Smuzhiyun break;
330*4882a593Smuzhiyun case IOMAP_NOCACHE_SER:
331*4882a593Smuzhiyun default:
332*4882a593Smuzhiyun cmode = _PAGE_NOCACHE_S;
333*4882a593Smuzhiyun break;
334*4882a593Smuzhiyun case IOMAP_NOCACHE_NONSER:
335*4882a593Smuzhiyun cmode = _PAGE_NOCACHE;
336*4882a593Smuzhiyun break;
337*4882a593Smuzhiyun case IOMAP_WRITETHROUGH:
338*4882a593Smuzhiyun cmode = _PAGE_CACHE040W;
339*4882a593Smuzhiyun break;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun } else {
342*4882a593Smuzhiyun switch (cmode) {
343*4882a593Smuzhiyun case IOMAP_NOCACHE_SER:
344*4882a593Smuzhiyun case IOMAP_NOCACHE_NONSER:
345*4882a593Smuzhiyun default:
346*4882a593Smuzhiyun cmode = _PAGE_NOCACHE030;
347*4882a593Smuzhiyun break;
348*4882a593Smuzhiyun case IOMAP_FULL_CACHING:
349*4882a593Smuzhiyun case IOMAP_WRITETHROUGH:
350*4882a593Smuzhiyun cmode = 0;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun while ((long)size > 0) {
355*4882a593Smuzhiyun pgd_dir = pgd_offset_k(virtaddr);
356*4882a593Smuzhiyun p4d_dir = p4d_offset(pgd_dir, virtaddr);
357*4882a593Smuzhiyun pud_dir = pud_offset(p4d_dir, virtaddr);
358*4882a593Smuzhiyun if (pud_bad(*pud_dir)) {
359*4882a593Smuzhiyun printk("iocachemode: bad pud(%08lx)\n", pud_val(*pud_dir));
360*4882a593Smuzhiyun pud_clear(pud_dir);
361*4882a593Smuzhiyun return;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun pmd_dir = pmd_offset(pud_dir, virtaddr);
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun #if CONFIG_PGTABLE_LEVELS == 3
366*4882a593Smuzhiyun if (CPU_IS_020_OR_030) {
367*4882a593Smuzhiyun unsigned long pmd = pmd_val(*pmd_dir);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun if ((pmd & _DESCTYPE_MASK) == _PAGE_PRESENT) {
370*4882a593Smuzhiyun *pmd_dir = __pmd((pmd & _CACHEMASK040) | cmode);
371*4882a593Smuzhiyun virtaddr += PMD_SIZE;
372*4882a593Smuzhiyun size -= PMD_SIZE;
373*4882a593Smuzhiyun continue;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun #endif
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun if (pmd_bad(*pmd_dir)) {
379*4882a593Smuzhiyun printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
380*4882a593Smuzhiyun pmd_clear(pmd_dir);
381*4882a593Smuzhiyun return;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
386*4882a593Smuzhiyun virtaddr += PAGE_SIZE;
387*4882a593Smuzhiyun size -= PAGE_SIZE;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun flush_tlb_all();
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun EXPORT_SYMBOL(kernel_set_cachemode);
393