1*819833afSPeter Tyser #ifndef __ASM_SH_SYSTEM_H 2*819833afSPeter Tyser #define __ASM_SH_SYSTEM_H 3*819833afSPeter Tyser 4*819833afSPeter Tyser /* 5*819833afSPeter Tyser * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima 6*819833afSPeter Tyser * Copyright (C) 2002 Paul Mundt 7*819833afSPeter Tyser * 8*819833afSPeter Tyser * from linux kernel code. 9*819833afSPeter Tyser */ 10*819833afSPeter Tyser 11*819833afSPeter Tyser #include <linux/irqflags.h> 12*819833afSPeter Tyser #include <asm/types.h> 13*819833afSPeter Tyser 14*819833afSPeter Tyser /* 15*819833afSPeter Tyser * switch_to() should switch tasks to task nr n, first 16*819833afSPeter Tyser */ 17*819833afSPeter Tyser 18*819833afSPeter Tyser #define switch_to(prev, next, last) do { \ 19*819833afSPeter Tyser struct task_struct *__last; \ 20*819833afSPeter Tyser register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ 21*819833afSPeter Tyser register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ 22*819833afSPeter Tyser register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \ 23*819833afSPeter Tyser register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next; \ 24*819833afSPeter Tyser register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp; \ 25*819833afSPeter Tyser register unsigned long __ts7 __asm__ ("r7") = next->thread.pc; \ 26*819833afSPeter Tyser __asm__ __volatile__ (".balign 4\n\t" \ 27*819833afSPeter Tyser "stc.l gbr, @-r15\n\t" \ 28*819833afSPeter Tyser "sts.l pr, @-r15\n\t" \ 29*819833afSPeter Tyser "mov.l r8, @-r15\n\t" \ 30*819833afSPeter Tyser "mov.l r9, @-r15\n\t" \ 31*819833afSPeter Tyser "mov.l r10, @-r15\n\t" \ 32*819833afSPeter Tyser "mov.l r11, @-r15\n\t" \ 33*819833afSPeter Tyser "mov.l r12, @-r15\n\t" \ 34*819833afSPeter Tyser "mov.l r13, @-r15\n\t" \ 35*819833afSPeter Tyser "mov.l r14, @-r15\n\t" \ 36*819833afSPeter Tyser "mov.l r15, @r1 ! save SP\n\t" \ 37*819833afSPeter Tyser "mov.l @r6, r15 ! change to new stack\n\t" \ 38*819833afSPeter Tyser "mova 1f, %0\n\t" \ 39*819833afSPeter Tyser "mov.l %0, @r2 ! save PC\n\t" \ 40*819833afSPeter Tyser "mov.l 2f, %0\n\t" \ 41*819833afSPeter Tyser "jmp @%0 ! call __switch_to\n\t" \ 42*819833afSPeter Tyser " lds r7, pr ! with return to new PC\n\t" \ 43*819833afSPeter Tyser ".balign 4\n" \ 44*819833afSPeter Tyser "2:\n\t" \ 45*819833afSPeter Tyser ".long __switch_to\n" \ 46*819833afSPeter Tyser "1:\n\t" \ 47*819833afSPeter Tyser "mov.l @r15+, r14\n\t" \ 48*819833afSPeter Tyser "mov.l @r15+, r13\n\t" \ 49*819833afSPeter Tyser "mov.l @r15+, r12\n\t" \ 50*819833afSPeter Tyser "mov.l @r15+, r11\n\t" \ 51*819833afSPeter Tyser "mov.l @r15+, r10\n\t" \ 52*819833afSPeter Tyser "mov.l @r15+, r9\n\t" \ 53*819833afSPeter Tyser "mov.l @r15+, r8\n\t" \ 54*819833afSPeter Tyser "lds.l @r15+, pr\n\t" \ 55*819833afSPeter Tyser "ldc.l @r15+, gbr\n\t" \ 56*819833afSPeter Tyser : "=z" (__last) \ 57*819833afSPeter Tyser : "r" (__ts1), "r" (__ts2), "r" (__ts4), \ 58*819833afSPeter Tyser "r" (__ts5), "r" (__ts6), "r" (__ts7) \ 59*819833afSPeter Tyser : "r3", "t"); \ 60*819833afSPeter Tyser last = __last; \ 61*819833afSPeter Tyser } while (0) 62*819833afSPeter Tyser 63*819833afSPeter Tyser /* 64*819833afSPeter Tyser * On SMP systems, when the scheduler does migration-cost autodetection, 65*819833afSPeter Tyser * it needs a way to flush as much of the CPU's caches as possible. 66*819833afSPeter Tyser * 67*819833afSPeter Tyser * TODO: fill this in! 68*819833afSPeter Tyser */ 69*819833afSPeter Tyser static inline void sched_cacheflush(void) 70*819833afSPeter Tyser { 71*819833afSPeter Tyser } 72*819833afSPeter Tyser 73*819833afSPeter Tyser #ifdef CONFIG_CPU_SH4A 74*819833afSPeter Tyser #define __icbi() \ 75*819833afSPeter Tyser { \ 76*819833afSPeter Tyser unsigned long __addr; \ 77*819833afSPeter Tyser __addr = 0xa8000000; \ 78*819833afSPeter Tyser __asm__ __volatile__( \ 79*819833afSPeter Tyser "icbi %0\n\t" \ 80*819833afSPeter Tyser : /* no output */ \ 81*819833afSPeter Tyser : "m" (__m(__addr))); \ 82*819833afSPeter Tyser } 83*819833afSPeter Tyser #endif 84*819833afSPeter Tyser 85*819833afSPeter Tyser static inline unsigned long tas(volatile int *m) 86*819833afSPeter Tyser { 87*819833afSPeter Tyser unsigned long retval; 88*819833afSPeter Tyser 89*819833afSPeter Tyser __asm__ __volatile__ ("tas.b @%1\n\t" 90*819833afSPeter Tyser "movt %0" 91*819833afSPeter Tyser : "=r" (retval): "r" (m): "t", "memory"); 92*819833afSPeter Tyser return retval; 93*819833afSPeter Tyser } 94*819833afSPeter Tyser 95*819833afSPeter Tyser /* 96*819833afSPeter Tyser * A brief note on ctrl_barrier(), the control register write barrier. 97*819833afSPeter Tyser * 98*819833afSPeter Tyser * Legacy SH cores typically require a sequence of 8 nops after 99*819833afSPeter Tyser * modification of a control register in order for the changes to take 100*819833afSPeter Tyser * effect. On newer cores (like the sh4a and sh5) this is accomplished 101*819833afSPeter Tyser * with icbi. 102*819833afSPeter Tyser * 103*819833afSPeter Tyser * Also note that on sh4a in the icbi case we can forego a synco for the 104*819833afSPeter Tyser * write barrier, as it's not necessary for control registers. 105*819833afSPeter Tyser * 106*819833afSPeter Tyser * Historically we have only done this type of barrier for the MMUCR, but 107*819833afSPeter Tyser * it's also necessary for the CCR, so we make it generic here instead. 108*819833afSPeter Tyser */ 109*819833afSPeter Tyser #ifdef CONFIG_CPU_SH4A 110*819833afSPeter Tyser #define mb() __asm__ __volatile__ ("synco": : :"memory") 111*819833afSPeter Tyser #define rmb() mb() 112*819833afSPeter Tyser #define wmb() __asm__ __volatile__ ("synco": : :"memory") 113*819833afSPeter Tyser #define ctrl_barrier() __icbi() 114*819833afSPeter Tyser #define read_barrier_depends() do { } while(0) 115*819833afSPeter Tyser #else 116*819833afSPeter Tyser #define mb() __asm__ __volatile__ ("": : :"memory") 117*819833afSPeter Tyser #define rmb() mb() 118*819833afSPeter Tyser #define wmb() __asm__ __volatile__ ("": : :"memory") 119*819833afSPeter Tyser #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") 120*819833afSPeter Tyser #define read_barrier_depends() do { } while(0) 121*819833afSPeter Tyser #endif 122*819833afSPeter Tyser 123*819833afSPeter Tyser #ifdef CONFIG_SMP 124*819833afSPeter Tyser #define smp_mb() mb() 125*819833afSPeter Tyser #define smp_rmb() rmb() 126*819833afSPeter Tyser #define smp_wmb() wmb() 127*819833afSPeter Tyser #define smp_read_barrier_depends() read_barrier_depends() 128*819833afSPeter Tyser #else 129*819833afSPeter Tyser #define smp_mb() barrier() 130*819833afSPeter Tyser #define smp_rmb() barrier() 131*819833afSPeter Tyser #define smp_wmb() barrier() 132*819833afSPeter Tyser #define smp_read_barrier_depends() do { } while(0) 133*819833afSPeter Tyser #endif 134*819833afSPeter Tyser 135*819833afSPeter Tyser #define set_mb(var, value) do { xchg(&var, value); } while (0) 136*819833afSPeter Tyser 137*819833afSPeter Tyser /* 138*819833afSPeter Tyser * Jump to P2 area. 139*819833afSPeter Tyser * When handling TLB or caches, we need to do it from P2 area. 140*819833afSPeter Tyser */ 141*819833afSPeter Tyser #define jump_to_P2() \ 142*819833afSPeter Tyser do { \ 143*819833afSPeter Tyser unsigned long __dummy; \ 144*819833afSPeter Tyser __asm__ __volatile__( \ 145*819833afSPeter Tyser "mov.l 1f, %0\n\t" \ 146*819833afSPeter Tyser "or %1, %0\n\t" \ 147*819833afSPeter Tyser "jmp @%0\n\t" \ 148*819833afSPeter Tyser " nop\n\t" \ 149*819833afSPeter Tyser ".balign 4\n" \ 150*819833afSPeter Tyser "1: .long 2f\n" \ 151*819833afSPeter Tyser "2:" \ 152*819833afSPeter Tyser : "=&r" (__dummy) \ 153*819833afSPeter Tyser : "r" (0x20000000)); \ 154*819833afSPeter Tyser } while (0) 155*819833afSPeter Tyser 156*819833afSPeter Tyser /* 157*819833afSPeter Tyser * Back to P1 area. 158*819833afSPeter Tyser */ 159*819833afSPeter Tyser #define back_to_P1() \ 160*819833afSPeter Tyser do { \ 161*819833afSPeter Tyser unsigned long __dummy; \ 162*819833afSPeter Tyser ctrl_barrier(); \ 163*819833afSPeter Tyser __asm__ __volatile__( \ 164*819833afSPeter Tyser "mov.l 1f, %0\n\t" \ 165*819833afSPeter Tyser "jmp @%0\n\t" \ 166*819833afSPeter Tyser " nop\n\t" \ 167*819833afSPeter Tyser ".balign 4\n" \ 168*819833afSPeter Tyser "1: .long 2f\n" \ 169*819833afSPeter Tyser "2:" \ 170*819833afSPeter Tyser : "=&r" (__dummy)); \ 171*819833afSPeter Tyser } while (0) 172*819833afSPeter Tyser 173*819833afSPeter Tyser static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) 174*819833afSPeter Tyser { 175*819833afSPeter Tyser unsigned long flags, retval; 176*819833afSPeter Tyser 177*819833afSPeter Tyser local_irq_save(flags); 178*819833afSPeter Tyser retval = *m; 179*819833afSPeter Tyser *m = val; 180*819833afSPeter Tyser local_irq_restore(flags); 181*819833afSPeter Tyser return retval; 182*819833afSPeter Tyser } 183*819833afSPeter Tyser 184*819833afSPeter Tyser static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) 185*819833afSPeter Tyser { 186*819833afSPeter Tyser unsigned long flags, retval; 187*819833afSPeter Tyser 188*819833afSPeter Tyser local_irq_save(flags); 189*819833afSPeter Tyser retval = *m; 190*819833afSPeter Tyser *m = val & 0xff; 191*819833afSPeter Tyser local_irq_restore(flags); 192*819833afSPeter Tyser return retval; 193*819833afSPeter Tyser } 194*819833afSPeter Tyser 195*819833afSPeter Tyser extern void __xchg_called_with_bad_pointer(void); 196*819833afSPeter Tyser 197*819833afSPeter Tyser #define __xchg(ptr, x, size) \ 198*819833afSPeter Tyser ({ \ 199*819833afSPeter Tyser unsigned long __xchg__res; \ 200*819833afSPeter Tyser volatile void *__xchg_ptr = (ptr); \ 201*819833afSPeter Tyser switch (size) { \ 202*819833afSPeter Tyser case 4: \ 203*819833afSPeter Tyser __xchg__res = xchg_u32(__xchg_ptr, x); \ 204*819833afSPeter Tyser break; \ 205*819833afSPeter Tyser case 1: \ 206*819833afSPeter Tyser __xchg__res = xchg_u8(__xchg_ptr, x); \ 207*819833afSPeter Tyser break; \ 208*819833afSPeter Tyser default: \ 209*819833afSPeter Tyser __xchg_called_with_bad_pointer(); \ 210*819833afSPeter Tyser __xchg__res = x; \ 211*819833afSPeter Tyser break; \ 212*819833afSPeter Tyser } \ 213*819833afSPeter Tyser \ 214*819833afSPeter Tyser __xchg__res; \ 215*819833afSPeter Tyser }) 216*819833afSPeter Tyser 217*819833afSPeter Tyser #define xchg(ptr,x) \ 218*819833afSPeter Tyser ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr)))) 219*819833afSPeter Tyser 220*819833afSPeter Tyser static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, 221*819833afSPeter Tyser unsigned long new) 222*819833afSPeter Tyser { 223*819833afSPeter Tyser __u32 retval; 224*819833afSPeter Tyser unsigned long flags; 225*819833afSPeter Tyser 226*819833afSPeter Tyser local_irq_save(flags); 227*819833afSPeter Tyser retval = *m; 228*819833afSPeter Tyser if (retval == old) 229*819833afSPeter Tyser *m = new; 230*819833afSPeter Tyser local_irq_restore(flags); /* implies memory barrier */ 231*819833afSPeter Tyser return retval; 232*819833afSPeter Tyser } 233*819833afSPeter Tyser 234*819833afSPeter Tyser /* This function doesn't exist, so you'll get a linker error 235*819833afSPeter Tyser * if something tries to do an invalid cmpxchg(). */ 236*819833afSPeter Tyser extern void __cmpxchg_called_with_bad_pointer(void); 237*819833afSPeter Tyser 238*819833afSPeter Tyser #define __HAVE_ARCH_CMPXCHG 1 239*819833afSPeter Tyser 240*819833afSPeter Tyser static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, 241*819833afSPeter Tyser unsigned long new, int size) 242*819833afSPeter Tyser { 243*819833afSPeter Tyser switch (size) { 244*819833afSPeter Tyser case 4: 245*819833afSPeter Tyser return __cmpxchg_u32(ptr, old, new); 246*819833afSPeter Tyser } 247*819833afSPeter Tyser __cmpxchg_called_with_bad_pointer(); 248*819833afSPeter Tyser return old; 249*819833afSPeter Tyser } 250*819833afSPeter Tyser 251*819833afSPeter Tyser #define cmpxchg(ptr,o,n) \ 252*819833afSPeter Tyser ({ \ 253*819833afSPeter Tyser __typeof__(*(ptr)) _o_ = (o); \ 254*819833afSPeter Tyser __typeof__(*(ptr)) _n_ = (n); \ 255*819833afSPeter Tyser (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 256*819833afSPeter Tyser (unsigned long)_n_, sizeof(*(ptr))); \ 257*819833afSPeter Tyser }) 258*819833afSPeter Tyser 259*819833afSPeter Tyser extern void *set_exception_table_vec(unsigned int vec, void *handler); 260*819833afSPeter Tyser 261*819833afSPeter Tyser static inline void *set_exception_table_evt(unsigned int evt, void *handler) 262*819833afSPeter Tyser { 263*819833afSPeter Tyser return set_exception_table_vec(evt >> 5, handler); 264*819833afSPeter Tyser } 265*819833afSPeter Tyser 266*819833afSPeter Tyser /* XXX 267*819833afSPeter Tyser * disable hlt during certain critical i/o operations 268*819833afSPeter Tyser */ 269*819833afSPeter Tyser #define HAVE_DISABLE_HLT 270*819833afSPeter Tyser void disable_hlt(void); 271*819833afSPeter Tyser void enable_hlt(void); 272*819833afSPeter Tyser 273*819833afSPeter Tyser #define arch_align_stack(x) (x) 274*819833afSPeter Tyser 275*819833afSPeter Tyser #endif 276