1*4882a593Smuzhiyun /* 2*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public 3*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive 4*4882a593Smuzhiyun * for more details. 5*4882a593Smuzhiyun * 6*4882a593Smuzhiyun * Copyright (C) 1998, 1999, 2003 by Ralf Baechle 7*4882a593Smuzhiyun * Copyright (C) 2014 by Maciej W. Rozycki 8*4882a593Smuzhiyun */ 9*4882a593Smuzhiyun #ifndef _ASM_TIMEX_H 10*4882a593Smuzhiyun #define _ASM_TIMEX_H 11*4882a593Smuzhiyun 12*4882a593Smuzhiyun #ifdef __KERNEL__ 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun #include <linux/compiler.h> 15*4882a593Smuzhiyun 16*4882a593Smuzhiyun #include <asm/cpu.h> 17*4882a593Smuzhiyun #include <asm/cpu-features.h> 18*4882a593Smuzhiyun #include <asm/mipsregs.h> 19*4882a593Smuzhiyun #include <asm/cpu-type.h> 20*4882a593Smuzhiyun 21*4882a593Smuzhiyun /* 22*4882a593Smuzhiyun * This is the clock rate of the i8253 PIT. A MIPS system may not have 23*4882a593Smuzhiyun * a PIT by the symbol is used all over the kernel including some APIs. 24*4882a593Smuzhiyun * So keeping it defined to the number for the PIT is the only sane thing 25*4882a593Smuzhiyun * for now. 26*4882a593Smuzhiyun */ 27*4882a593Smuzhiyun #define CLOCK_TICK_RATE 1193182 28*4882a593Smuzhiyun 29*4882a593Smuzhiyun /* 30*4882a593Smuzhiyun * Standard way to access the cycle counter. 31*4882a593Smuzhiyun * Currently only used on SMP for scheduling. 32*4882a593Smuzhiyun * 33*4882a593Smuzhiyun * Only the low 32 bits are available as a continuously counting entity. 34*4882a593Smuzhiyun * But this only means we'll force a reschedule every 8 seconds or so, 35*4882a593Smuzhiyun * which isn't an evil thing. 36*4882a593Smuzhiyun * 37*4882a593Smuzhiyun * We know that all SMP capable CPUs have cycle counters. 38*4882a593Smuzhiyun */ 39*4882a593Smuzhiyun 40*4882a593Smuzhiyun typedef unsigned int cycles_t; 41*4882a593Smuzhiyun 42*4882a593Smuzhiyun /* 43*4882a593Smuzhiyun * On R4000/R4400 an erratum exists such that if the cycle counter is 44*4882a593Smuzhiyun * read in the exact moment that it is matching the compare register, 45*4882a593Smuzhiyun * no interrupt will be generated. 46*4882a593Smuzhiyun * 47*4882a593Smuzhiyun * There is a suggested workaround and also the erratum can't strike if 48*4882a593Smuzhiyun * the compare interrupt isn't being used as the clock source device. 49*4882a593Smuzhiyun * However for now the implementaton of this function doesn't get these 50*4882a593Smuzhiyun * fine details right. 51*4882a593Smuzhiyun */ can_use_mips_counter(unsigned int prid)52*4882a593Smuzhiyunstatic inline int can_use_mips_counter(unsigned int prid) 53*4882a593Smuzhiyun { 54*4882a593Smuzhiyun int comp = (prid & PRID_COMP_MASK) != PRID_COMP_LEGACY; 55*4882a593Smuzhiyun 56*4882a593Smuzhiyun if (__builtin_constant_p(cpu_has_counter) && !cpu_has_counter) 57*4882a593Smuzhiyun return 0; 58*4882a593Smuzhiyun else if (__builtin_constant_p(cpu_has_mips_r) && cpu_has_mips_r) 59*4882a593Smuzhiyun return 1; 60*4882a593Smuzhiyun else if (likely(!__builtin_constant_p(cpu_has_mips_r) && comp)) 61*4882a593Smuzhiyun return 1; 62*4882a593Smuzhiyun /* Make sure we don't peek at cpu_data[0].options in the fast path! */ 63*4882a593Smuzhiyun if (!__builtin_constant_p(cpu_has_counter)) 64*4882a593Smuzhiyun asm volatile("" : "=m" (cpu_data[0].options)); 65*4882a593Smuzhiyun if (likely(cpu_has_counter && 66*4882a593Smuzhiyun prid > (PRID_IMP_R4000 | PRID_REV_ENCODE_44(15, 15)))) 67*4882a593Smuzhiyun return 1; 68*4882a593Smuzhiyun else 69*4882a593Smuzhiyun return 0; 70*4882a593Smuzhiyun } 71*4882a593Smuzhiyun get_cycles(void)72*4882a593Smuzhiyunstatic inline cycles_t get_cycles(void) 73*4882a593Smuzhiyun { 74*4882a593Smuzhiyun if (can_use_mips_counter(read_c0_prid())) 75*4882a593Smuzhiyun return read_c0_count(); 76*4882a593Smuzhiyun else 77*4882a593Smuzhiyun return 0; /* no usable counter */ 78*4882a593Smuzhiyun } 79*4882a593Smuzhiyun #define get_cycles get_cycles 80*4882a593Smuzhiyun 81*4882a593Smuzhiyun /* 82*4882a593Smuzhiyun * Like get_cycles - but where c0_count is not available we desperately 83*4882a593Smuzhiyun * use c0_random in an attempt to get at least a little bit of entropy. 84*4882a593Smuzhiyun */ random_get_entropy(void)85*4882a593Smuzhiyunstatic inline unsigned long random_get_entropy(void) 86*4882a593Smuzhiyun { 87*4882a593Smuzhiyun unsigned int c0_random; 88*4882a593Smuzhiyun 89*4882a593Smuzhiyun if (can_use_mips_counter(read_c0_prid())) 90*4882a593Smuzhiyun return read_c0_count(); 91*4882a593Smuzhiyun 92*4882a593Smuzhiyun if (cpu_has_3kex) 93*4882a593Smuzhiyun c0_random = (read_c0_random() >> 8) & 0x3f; 94*4882a593Smuzhiyun else 95*4882a593Smuzhiyun c0_random = read_c0_random() & 0x3f; 96*4882a593Smuzhiyun return (random_get_entropy_fallback() << 6) | (0x3f - c0_random); 97*4882a593Smuzhiyun } 98*4882a593Smuzhiyun #define random_get_entropy random_get_entropy 99*4882a593Smuzhiyun 100*4882a593Smuzhiyun #endif /* __KERNEL__ */ 101*4882a593Smuzhiyun 102*4882a593Smuzhiyun #endif /* _ASM_TIMEX_H */ 103