1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _M68KNOMMU_CACHEFLUSH_H
3*4882a593Smuzhiyun #define _M68KNOMMU_CACHEFLUSH_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun * (C) Copyright 2000-2010, Greg Ungerer <gerg@snapgear.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #include <linux/mm.h>
9*4882a593Smuzhiyun #include <asm/mcfsim.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #define flush_cache_all() __flush_cache_all()
12*4882a593Smuzhiyun #define flush_dcache_range(start, len) __flush_dcache_all()
13*4882a593Smuzhiyun #define flush_icache_range(start, len) __flush_icache_all()
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun void mcf_cache_push(void);
16*4882a593Smuzhiyun
__clear_cache_all(void)17*4882a593Smuzhiyun static inline void __clear_cache_all(void)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun #ifdef CACHE_INVALIDATE
20*4882a593Smuzhiyun __asm__ __volatile__ (
21*4882a593Smuzhiyun "movec %0, %%CACR\n\t"
22*4882a593Smuzhiyun "nop\n\t"
23*4882a593Smuzhiyun : : "r" (CACHE_INVALIDATE) );
24*4882a593Smuzhiyun #endif
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun
__flush_cache_all(void)27*4882a593Smuzhiyun static inline void __flush_cache_all(void)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun #ifdef CACHE_PUSH
30*4882a593Smuzhiyun mcf_cache_push();
31*4882a593Smuzhiyun #endif
32*4882a593Smuzhiyun __clear_cache_all();
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun * Some ColdFire parts implement separate instruction and data caches,
37*4882a593Smuzhiyun * on those we should just flush the appropriate cache. If we don't need
38*4882a593Smuzhiyun * to do any specific flushing then this will be optimized away.
39*4882a593Smuzhiyun */
__flush_icache_all(void)40*4882a593Smuzhiyun static inline void __flush_icache_all(void)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun #ifdef CACHE_INVALIDATEI
43*4882a593Smuzhiyun __asm__ __volatile__ (
44*4882a593Smuzhiyun "movec %0, %%CACR\n\t"
45*4882a593Smuzhiyun "nop\n\t"
46*4882a593Smuzhiyun : : "r" (CACHE_INVALIDATEI) );
47*4882a593Smuzhiyun #endif
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
__flush_dcache_all(void)50*4882a593Smuzhiyun static inline void __flush_dcache_all(void)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun #ifdef CACHE_PUSH
53*4882a593Smuzhiyun mcf_cache_push();
54*4882a593Smuzhiyun #endif
55*4882a593Smuzhiyun #ifdef CACHE_INVALIDATED
56*4882a593Smuzhiyun __asm__ __volatile__ (
57*4882a593Smuzhiyun "movec %0, %%CACR\n\t"
58*4882a593Smuzhiyun "nop\n\t"
59*4882a593Smuzhiyun : : "r" (CACHE_INVALIDATED) );
60*4882a593Smuzhiyun #else
61*4882a593Smuzhiyun /* Flush the write buffer */
62*4882a593Smuzhiyun __asm__ __volatile__ ( "nop" );
63*4882a593Smuzhiyun #endif
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun * Push cache entries at supplied address. We want to write back any dirty
68*4882a593Smuzhiyun * data and then invalidate the cache lines associated with this address.
69*4882a593Smuzhiyun */
cache_push(unsigned long paddr,int len)70*4882a593Smuzhiyun static inline void cache_push(unsigned long paddr, int len)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun __flush_cache_all();
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /*
76*4882a593Smuzhiyun * Clear cache entries at supplied address (that is don't write back any
77*4882a593Smuzhiyun * dirty data).
78*4882a593Smuzhiyun */
cache_clear(unsigned long paddr,int len)79*4882a593Smuzhiyun static inline void cache_clear(unsigned long paddr, int len)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun __clear_cache_all();
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun #include <asm-generic/cacheflush.h>
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun #endif /* _M68KNOMMU_CACHEFLUSH_H */
87