1819833afSPeter Tyser #ifndef __ASM_SH_SYSTEM_H 2819833afSPeter Tyser #define __ASM_SH_SYSTEM_H 3819833afSPeter Tyser 4819833afSPeter Tyser /* 5819833afSPeter Tyser * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima 6819833afSPeter Tyser * Copyright (C) 2002 Paul Mundt 7819833afSPeter Tyser * 8819833afSPeter Tyser * from linux kernel code. 9819833afSPeter Tyser */ 10819833afSPeter Tyser 119a1e3e9fSNobuhiro Iwamatsu #include <asm/irqflags.h> 12819833afSPeter Tyser #include <asm/types.h> 13819833afSPeter Tyser 14819833afSPeter Tyser /* 15819833afSPeter Tyser * switch_to() should switch tasks to task nr n, first 16819833afSPeter Tyser */ 17819833afSPeter Tyser 18819833afSPeter Tyser #define switch_to(prev, next, last) do { \ 19819833afSPeter Tyser struct task_struct *__last; \ 20819833afSPeter Tyser register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \ 21819833afSPeter Tyser register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \ 22819833afSPeter Tyser register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \ 23819833afSPeter Tyser register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next; \ 24819833afSPeter Tyser register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp; \ 25819833afSPeter Tyser register unsigned long __ts7 __asm__ ("r7") = next->thread.pc; \ 26819833afSPeter Tyser __asm__ __volatile__ (".balign 4\n\t" \ 27819833afSPeter Tyser "stc.l gbr, @-r15\n\t" \ 28819833afSPeter Tyser "sts.l pr, @-r15\n\t" \ 29819833afSPeter Tyser "mov.l r8, @-r15\n\t" \ 30819833afSPeter Tyser "mov.l r9, @-r15\n\t" \ 31819833afSPeter Tyser "mov.l r10, @-r15\n\t" \ 32819833afSPeter Tyser "mov.l r11, @-r15\n\t" \ 33819833afSPeter Tyser "mov.l r12, @-r15\n\t" \ 34819833afSPeter Tyser "mov.l r13, @-r15\n\t" \ 35819833afSPeter Tyser "mov.l r14, @-r15\n\t" \ 36819833afSPeter Tyser "mov.l r15, @r1 ! save SP\n\t" \ 37819833afSPeter Tyser "mov.l @r6, r15 ! change to new stack\n\t" \ 38819833afSPeter Tyser "mova 1f, %0\n\t" \ 39819833afSPeter Tyser "mov.l %0, @r2 ! save PC\n\t" \ 40819833afSPeter Tyser "mov.l 2f, %0\n\t" \ 41819833afSPeter Tyser "jmp @%0 ! call __switch_to\n\t" \ 42819833afSPeter Tyser " lds r7, pr ! with return to new PC\n\t" \ 43819833afSPeter Tyser ".balign 4\n" \ 44819833afSPeter Tyser "2:\n\t" \ 45819833afSPeter Tyser ".long __switch_to\n" \ 46819833afSPeter Tyser "1:\n\t" \ 47819833afSPeter Tyser "mov.l @r15+, r14\n\t" \ 48819833afSPeter Tyser "mov.l @r15+, r13\n\t" \ 49819833afSPeter Tyser "mov.l @r15+, r12\n\t" \ 50819833afSPeter Tyser "mov.l @r15+, r11\n\t" \ 51819833afSPeter Tyser "mov.l @r15+, r10\n\t" \ 52819833afSPeter Tyser "mov.l @r15+, r9\n\t" \ 53819833afSPeter Tyser "mov.l @r15+, r8\n\t" \ 54819833afSPeter Tyser "lds.l @r15+, pr\n\t" \ 55819833afSPeter Tyser "ldc.l @r15+, gbr\n\t" \ 56819833afSPeter Tyser : "=z" (__last) \ 57819833afSPeter Tyser : "r" (__ts1), "r" (__ts2), "r" (__ts4), \ 58819833afSPeter Tyser "r" (__ts5), "r" (__ts6), "r" (__ts7) \ 59819833afSPeter Tyser : "r3", "t"); \ 60819833afSPeter Tyser last = __last; \ 61819833afSPeter Tyser } while (0) 62819833afSPeter Tyser 63819833afSPeter Tyser /* 64819833afSPeter Tyser * On SMP systems, when the scheduler does migration-cost autodetection, 65819833afSPeter Tyser * it needs a way to flush as much of the CPU's caches as possible. 66819833afSPeter Tyser * 67819833afSPeter Tyser * TODO: fill this in! 68819833afSPeter Tyser */ 69819833afSPeter Tyser static inline void sched_cacheflush(void) 70819833afSPeter Tyser { 71819833afSPeter Tyser } 72819833afSPeter Tyser 73819833afSPeter Tyser #ifdef CONFIG_CPU_SH4A 74819833afSPeter Tyser #define __icbi() \ 75819833afSPeter Tyser { \ 76819833afSPeter Tyser unsigned long __addr; \ 77819833afSPeter Tyser __addr = 0xa8000000; \ 78819833afSPeter Tyser __asm__ __volatile__( \ 79819833afSPeter Tyser "icbi %0\n\t" \ 80819833afSPeter Tyser : /* no output */ \ 81819833afSPeter Tyser : "m" (__m(__addr))); \ 82819833afSPeter Tyser } 83819833afSPeter Tyser #endif 84819833afSPeter Tyser 85819833afSPeter Tyser static inline unsigned long tas(volatile int *m) 86819833afSPeter Tyser { 87819833afSPeter Tyser unsigned long retval; 88819833afSPeter Tyser 89819833afSPeter Tyser __asm__ __volatile__ ("tas.b @%1\n\t" 90819833afSPeter Tyser "movt %0" 91819833afSPeter Tyser : "=r" (retval): "r" (m): "t", "memory"); 92819833afSPeter Tyser return retval; 93819833afSPeter Tyser } 94819833afSPeter Tyser 95819833afSPeter Tyser /* 96819833afSPeter Tyser * A brief note on ctrl_barrier(), the control register write barrier. 97819833afSPeter Tyser * 98819833afSPeter Tyser * Legacy SH cores typically require a sequence of 8 nops after 99819833afSPeter Tyser * modification of a control register in order for the changes to take 100819833afSPeter Tyser * effect. On newer cores (like the sh4a and sh5) this is accomplished 101819833afSPeter Tyser * with icbi. 102819833afSPeter Tyser * 103819833afSPeter Tyser * Also note that on sh4a in the icbi case we can forego a synco for the 104819833afSPeter Tyser * write barrier, as it's not necessary for control registers. 105819833afSPeter Tyser * 106819833afSPeter Tyser * Historically we have only done this type of barrier for the MMUCR, but 107819833afSPeter Tyser * it's also necessary for the CCR, so we make it generic here instead. 108819833afSPeter Tyser */ 109819833afSPeter Tyser #ifdef CONFIG_CPU_SH4A 110819833afSPeter Tyser #define mb() __asm__ __volatile__ ("synco": : :"memory") 111819833afSPeter Tyser #define rmb() mb() 112819833afSPeter Tyser #define wmb() __asm__ __volatile__ ("synco": : :"memory") 113819833afSPeter Tyser #define ctrl_barrier() __icbi() 114819833afSPeter Tyser #define read_barrier_depends() do { } while(0) 115819833afSPeter Tyser #else 116819833afSPeter Tyser #define mb() __asm__ __volatile__ ("": : :"memory") 117819833afSPeter Tyser #define rmb() mb() 118819833afSPeter Tyser #define wmb() __asm__ __volatile__ ("": : :"memory") 119819833afSPeter Tyser #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") 120819833afSPeter Tyser #define read_barrier_depends() do { } while(0) 121819833afSPeter Tyser #endif 122819833afSPeter Tyser 123819833afSPeter Tyser #ifdef CONFIG_SMP 124819833afSPeter Tyser #define smp_mb() mb() 125819833afSPeter Tyser #define smp_rmb() rmb() 126819833afSPeter Tyser #define smp_wmb() wmb() 127819833afSPeter Tyser #define smp_read_barrier_depends() read_barrier_depends() 128819833afSPeter Tyser #else 129819833afSPeter Tyser #define smp_mb() barrier() 130819833afSPeter Tyser #define smp_rmb() barrier() 131819833afSPeter Tyser #define smp_wmb() barrier() 132819833afSPeter Tyser #define smp_read_barrier_depends() do { } while(0) 133819833afSPeter Tyser #endif 134819833afSPeter Tyser 135819833afSPeter Tyser #define set_mb(var, value) do { xchg(&var, value); } while (0) 136819833afSPeter Tyser 137819833afSPeter Tyser /* 138819833afSPeter Tyser * Jump to P2 area. 139819833afSPeter Tyser * When handling TLB or caches, we need to do it from P2 area. 140819833afSPeter Tyser */ 141819833afSPeter Tyser #define jump_to_P2() \ 142819833afSPeter Tyser do { \ 143819833afSPeter Tyser unsigned long __dummy; \ 144819833afSPeter Tyser __asm__ __volatile__( \ 145819833afSPeter Tyser "mov.l 1f, %0\n\t" \ 146819833afSPeter Tyser "or %1, %0\n\t" \ 147819833afSPeter Tyser "jmp @%0\n\t" \ 148819833afSPeter Tyser " nop\n\t" \ 149819833afSPeter Tyser ".balign 4\n" \ 150819833afSPeter Tyser "1: .long 2f\n" \ 151819833afSPeter Tyser "2:" \ 152819833afSPeter Tyser : "=&r" (__dummy) \ 153819833afSPeter Tyser : "r" (0x20000000)); \ 154819833afSPeter Tyser } while (0) 155819833afSPeter Tyser 156819833afSPeter Tyser /* 157819833afSPeter Tyser * Back to P1 area. 158819833afSPeter Tyser */ 159819833afSPeter Tyser #define back_to_P1() \ 160819833afSPeter Tyser do { \ 161819833afSPeter Tyser unsigned long __dummy; \ 162819833afSPeter Tyser ctrl_barrier(); \ 163819833afSPeter Tyser __asm__ __volatile__( \ 164819833afSPeter Tyser "mov.l 1f, %0\n\t" \ 165819833afSPeter Tyser "jmp @%0\n\t" \ 166819833afSPeter Tyser " nop\n\t" \ 167819833afSPeter Tyser ".balign 4\n" \ 168819833afSPeter Tyser "1: .long 2f\n" \ 169819833afSPeter Tyser "2:" \ 170819833afSPeter Tyser : "=&r" (__dummy)); \ 171819833afSPeter Tyser } while (0) 172819833afSPeter Tyser 173819833afSPeter Tyser static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) 174819833afSPeter Tyser { 175819833afSPeter Tyser unsigned long flags, retval; 176819833afSPeter Tyser 177819833afSPeter Tyser local_irq_save(flags); 178819833afSPeter Tyser retval = *m; 179819833afSPeter Tyser *m = val; 180819833afSPeter Tyser local_irq_restore(flags); 181819833afSPeter Tyser return retval; 182819833afSPeter Tyser } 183819833afSPeter Tyser 184819833afSPeter Tyser static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) 185819833afSPeter Tyser { 186819833afSPeter Tyser unsigned long flags, retval; 187819833afSPeter Tyser 188819833afSPeter Tyser local_irq_save(flags); 189819833afSPeter Tyser retval = *m; 190819833afSPeter Tyser *m = val & 0xff; 191819833afSPeter Tyser local_irq_restore(flags); 192819833afSPeter Tyser return retval; 193819833afSPeter Tyser } 194819833afSPeter Tyser 195819833afSPeter Tyser extern void __xchg_called_with_bad_pointer(void); 196819833afSPeter Tyser 197819833afSPeter Tyser #define __xchg(ptr, x, size) \ 198819833afSPeter Tyser ({ \ 199819833afSPeter Tyser unsigned long __xchg__res; \ 200819833afSPeter Tyser volatile void *__xchg_ptr = (ptr); \ 201819833afSPeter Tyser switch (size) { \ 202819833afSPeter Tyser case 4: \ 203819833afSPeter Tyser __xchg__res = xchg_u32(__xchg_ptr, x); \ 204819833afSPeter Tyser break; \ 205819833afSPeter Tyser case 1: \ 206819833afSPeter Tyser __xchg__res = xchg_u8(__xchg_ptr, x); \ 207819833afSPeter Tyser break; \ 208819833afSPeter Tyser default: \ 209819833afSPeter Tyser __xchg_called_with_bad_pointer(); \ 210819833afSPeter Tyser __xchg__res = x; \ 211819833afSPeter Tyser break; \ 212819833afSPeter Tyser } \ 213819833afSPeter Tyser \ 214819833afSPeter Tyser __xchg__res; \ 215819833afSPeter Tyser }) 216819833afSPeter Tyser 217819833afSPeter Tyser #define xchg(ptr,x) \ 218819833afSPeter Tyser ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr)))) 219819833afSPeter Tyser 220819833afSPeter Tyser static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, 221819833afSPeter Tyser unsigned long new) 222819833afSPeter Tyser { 223819833afSPeter Tyser __u32 retval; 224819833afSPeter Tyser unsigned long flags; 225819833afSPeter Tyser 226819833afSPeter Tyser local_irq_save(flags); 227819833afSPeter Tyser retval = *m; 228819833afSPeter Tyser if (retval == old) 229819833afSPeter Tyser *m = new; 230819833afSPeter Tyser local_irq_restore(flags); /* implies memory barrier */ 231819833afSPeter Tyser return retval; 232819833afSPeter Tyser } 233819833afSPeter Tyser 234819833afSPeter Tyser /* This function doesn't exist, so you'll get a linker error 235819833afSPeter Tyser * if something tries to do an invalid cmpxchg(). */ 236819833afSPeter Tyser extern void __cmpxchg_called_with_bad_pointer(void); 237819833afSPeter Tyser 238819833afSPeter Tyser #define __HAVE_ARCH_CMPXCHG 1 239819833afSPeter Tyser 240819833afSPeter Tyser static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, 241819833afSPeter Tyser unsigned long new, int size) 242819833afSPeter Tyser { 243819833afSPeter Tyser switch (size) { 244819833afSPeter Tyser case 4: 245819833afSPeter Tyser return __cmpxchg_u32(ptr, old, new); 246819833afSPeter Tyser } 247819833afSPeter Tyser __cmpxchg_called_with_bad_pointer(); 248819833afSPeter Tyser return old; 249819833afSPeter Tyser } 250819833afSPeter Tyser 251819833afSPeter Tyser #define cmpxchg(ptr,o,n) \ 252819833afSPeter Tyser ({ \ 253819833afSPeter Tyser __typeof__(*(ptr)) _o_ = (o); \ 254819833afSPeter Tyser __typeof__(*(ptr)) _n_ = (n); \ 255819833afSPeter Tyser (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 256819833afSPeter Tyser (unsigned long)_n_, sizeof(*(ptr))); \ 257819833afSPeter Tyser }) 258819833afSPeter Tyser 259819833afSPeter Tyser extern void *set_exception_table_vec(unsigned int vec, void *handler); 260819833afSPeter Tyser 261819833afSPeter Tyser static inline void *set_exception_table_evt(unsigned int evt, void *handler) 262819833afSPeter Tyser { 263819833afSPeter Tyser return set_exception_table_vec(evt >> 5, handler); 264819833afSPeter Tyser } 265819833afSPeter Tyser 266819833afSPeter Tyser /* XXX 267819833afSPeter Tyser * disable hlt during certain critical i/o operations 268819833afSPeter Tyser */ 269819833afSPeter Tyser #define HAVE_DISABLE_HLT 270819833afSPeter Tyser void disable_hlt(void); 271819833afSPeter Tyser void enable_hlt(void); 272819833afSPeter Tyser 273819833afSPeter Tyser #define arch_align_stack(x) (x) 274819833afSPeter Tyser 275754613f7SNobuhiro Iwamatsu static inline void trigger_address_error(void) 276754613f7SNobuhiro Iwamatsu { 277*867da0d4SYoshihiro Shimoda set_bl_bit(); 278754613f7SNobuhiro Iwamatsu __asm__ __volatile__ ( 279754613f7SNobuhiro Iwamatsu "mov.l @%1, %0" 280754613f7SNobuhiro Iwamatsu : 281754613f7SNobuhiro Iwamatsu : "r" (0x10000000), "r" (0x80000001) 282754613f7SNobuhiro Iwamatsu ); 283754613f7SNobuhiro Iwamatsu } 284754613f7SNobuhiro Iwamatsu 285819833afSPeter Tyser #endif 286