1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun * for more details.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 1994-1996 Linus Torvalds & authors
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Copied from i386; many of the especially older MIPS or ISA-based platforms
9*4882a593Smuzhiyun * are basically identical. Using this file probably implies i8259 PIC
10*4882a593Smuzhiyun * support in a system but the very least interrupt numbers 0 - 15 need to
11*4882a593Smuzhiyun * be put aside for legacy devices.
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun #ifndef __ASM_MACH_GENERIC_IDE_H
14*4882a593Smuzhiyun #define __ASM_MACH_GENERIC_IDE_H
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #ifdef __KERNEL__
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <linux/pci.h>
19*4882a593Smuzhiyun #include <linux/stddef.h>
20*4882a593Smuzhiyun #include <asm/processor.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /* MIPS port and memory-mapped I/O string operations. */
__ide_flush_prologue(void)23*4882a593Smuzhiyun static inline void __ide_flush_prologue(void)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun #ifdef CONFIG_SMP
26*4882a593Smuzhiyun if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)
27*4882a593Smuzhiyun preempt_disable();
28*4882a593Smuzhiyun #endif
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun
__ide_flush_epilogue(void)31*4882a593Smuzhiyun static inline void __ide_flush_epilogue(void)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun #ifdef CONFIG_SMP
34*4882a593Smuzhiyun if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)
35*4882a593Smuzhiyun preempt_enable();
36*4882a593Smuzhiyun #endif
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
__ide_flush_dcache_range(unsigned long addr,unsigned long size)39*4882a593Smuzhiyun static inline void __ide_flush_dcache_range(unsigned long addr, unsigned long size)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) {
42*4882a593Smuzhiyun unsigned long end = addr + size;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun while (addr < end) {
45*4882a593Smuzhiyun local_flush_data_cache_page((void *)addr);
46*4882a593Smuzhiyun addr += PAGE_SIZE;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun * insw() and gang might be called with interrupts disabled, so we can't
53*4882a593Smuzhiyun * send IPIs for flushing due to the potencial of deadlocks, see the comment
54*4882a593Smuzhiyun * above smp_call_function() in arch/mips/kernel/smp.c. We work around the
55*4882a593Smuzhiyun * problem by disabling preemption so we know we actually perform the flush
56*4882a593Smuzhiyun * on the processor that actually has the lines to be flushed which hopefully
57*4882a593Smuzhiyun * is even better for performance anyway.
58*4882a593Smuzhiyun */
__ide_insw(unsigned long port,void * addr,unsigned int count)59*4882a593Smuzhiyun static inline void __ide_insw(unsigned long port, void *addr,
60*4882a593Smuzhiyun unsigned int count)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun __ide_flush_prologue();
63*4882a593Smuzhiyun insw(port, addr, count);
64*4882a593Smuzhiyun __ide_flush_dcache_range((unsigned long)addr, count * 2);
65*4882a593Smuzhiyun __ide_flush_epilogue();
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
__ide_insl(unsigned long port,void * addr,unsigned int count)68*4882a593Smuzhiyun static inline void __ide_insl(unsigned long port, void *addr, unsigned int count)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun __ide_flush_prologue();
71*4882a593Smuzhiyun insl(port, addr, count);
72*4882a593Smuzhiyun __ide_flush_dcache_range((unsigned long)addr, count * 4);
73*4882a593Smuzhiyun __ide_flush_epilogue();
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
__ide_outsw(unsigned long port,const void * addr,unsigned long count)76*4882a593Smuzhiyun static inline void __ide_outsw(unsigned long port, const void *addr,
77*4882a593Smuzhiyun unsigned long count)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun __ide_flush_prologue();
80*4882a593Smuzhiyun outsw(port, addr, count);
81*4882a593Smuzhiyun __ide_flush_dcache_range((unsigned long)addr, count * 2);
82*4882a593Smuzhiyun __ide_flush_epilogue();
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
__ide_outsl(unsigned long port,const void * addr,unsigned long count)85*4882a593Smuzhiyun static inline void __ide_outsl(unsigned long port, const void *addr,
86*4882a593Smuzhiyun unsigned long count)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun __ide_flush_prologue();
89*4882a593Smuzhiyun outsl(port, addr, count);
90*4882a593Smuzhiyun __ide_flush_dcache_range((unsigned long)addr, count * 4);
91*4882a593Smuzhiyun __ide_flush_epilogue();
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
__ide_mm_insw(void __iomem * port,void * addr,u32 count)94*4882a593Smuzhiyun static inline void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun __ide_flush_prologue();
97*4882a593Smuzhiyun readsw(port, addr, count);
98*4882a593Smuzhiyun __ide_flush_dcache_range((unsigned long)addr, count * 2);
99*4882a593Smuzhiyun __ide_flush_epilogue();
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
__ide_mm_insl(void __iomem * port,void * addr,u32 count)102*4882a593Smuzhiyun static inline void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun __ide_flush_prologue();
105*4882a593Smuzhiyun readsl(port, addr, count);
106*4882a593Smuzhiyun __ide_flush_dcache_range((unsigned long)addr, count * 4);
107*4882a593Smuzhiyun __ide_flush_epilogue();
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
__ide_mm_outsw(void __iomem * port,void * addr,u32 count)110*4882a593Smuzhiyun static inline void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun __ide_flush_prologue();
113*4882a593Smuzhiyun writesw(port, addr, count);
114*4882a593Smuzhiyun __ide_flush_dcache_range((unsigned long)addr, count * 2);
115*4882a593Smuzhiyun __ide_flush_epilogue();
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
__ide_mm_outsl(void __iomem * port,void * addr,u32 count)118*4882a593Smuzhiyun static inline void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun __ide_flush_prologue();
121*4882a593Smuzhiyun writesl(port, addr, count);
122*4882a593Smuzhiyun __ide_flush_dcache_range((unsigned long)addr, count * 4);
123*4882a593Smuzhiyun __ide_flush_epilogue();
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /* ide_insw calls insw, not __ide_insw. Why? */
127*4882a593Smuzhiyun #undef insw
128*4882a593Smuzhiyun #undef insl
129*4882a593Smuzhiyun #undef outsw
130*4882a593Smuzhiyun #undef outsl
131*4882a593Smuzhiyun #define insw(port, addr, count) __ide_insw(port, addr, count)
132*4882a593Smuzhiyun #define insl(port, addr, count) __ide_insl(port, addr, count)
133*4882a593Smuzhiyun #define outsw(port, addr, count) __ide_outsw(port, addr, count)
134*4882a593Smuzhiyun #define outsl(port, addr, count) __ide_outsl(port, addr, count)
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun #endif /* __KERNEL__ */
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun #endif /* __ASM_MACH_GENERIC_IDE_H */
139