1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * Copyright (C) 2018 SiFive 4*4882a593Smuzhiyun * Copyright (C) 2018 Andes Technology Corporation 5*4882a593Smuzhiyun * 6*4882a593Smuzhiyun */ 7*4882a593Smuzhiyun 8*4882a593Smuzhiyun #ifndef _ASM_RISCV_PERF_EVENT_H 9*4882a593Smuzhiyun #define _ASM_RISCV_PERF_EVENT_H 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun #include <linux/perf_event.h> 12*4882a593Smuzhiyun #include <linux/ptrace.h> 13*4882a593Smuzhiyun #include <linux/interrupt.h> 14*4882a593Smuzhiyun 15*4882a593Smuzhiyun #ifdef CONFIG_RISCV_BASE_PMU 16*4882a593Smuzhiyun #define RISCV_BASE_COUNTERS 2 17*4882a593Smuzhiyun 18*4882a593Smuzhiyun /* 19*4882a593Smuzhiyun * The RISCV_MAX_COUNTERS parameter should be specified. 20*4882a593Smuzhiyun */ 21*4882a593Smuzhiyun 22*4882a593Smuzhiyun #define RISCV_MAX_COUNTERS 2 23*4882a593Smuzhiyun 24*4882a593Smuzhiyun /* 25*4882a593Smuzhiyun * These are the indexes of bits in counteren register *minus* 1, 26*4882a593Smuzhiyun * except for cycle. It would be coherent if it can directly mapped 27*4882a593Smuzhiyun * to counteren bit definition, but there is a *time* register at 28*4882a593Smuzhiyun * counteren[1]. Per-cpu structure is scarce resource here. 29*4882a593Smuzhiyun * 30*4882a593Smuzhiyun * According to the spec, an implementation can support counter up to 31*4882a593Smuzhiyun * mhpmcounter31, but many high-end processors has at most 6 general 32*4882a593Smuzhiyun * PMCs, we give the definition to MHPMCOUNTER8 here. 33*4882a593Smuzhiyun */ 34*4882a593Smuzhiyun #define RISCV_PMU_CYCLE 0 35*4882a593Smuzhiyun #define RISCV_PMU_INSTRET 1 36*4882a593Smuzhiyun #define RISCV_PMU_MHPMCOUNTER3 2 37*4882a593Smuzhiyun #define RISCV_PMU_MHPMCOUNTER4 3 38*4882a593Smuzhiyun #define RISCV_PMU_MHPMCOUNTER5 4 39*4882a593Smuzhiyun #define RISCV_PMU_MHPMCOUNTER6 5 40*4882a593Smuzhiyun #define RISCV_PMU_MHPMCOUNTER7 6 41*4882a593Smuzhiyun #define RISCV_PMU_MHPMCOUNTER8 7 42*4882a593Smuzhiyun 43*4882a593Smuzhiyun #define RISCV_OP_UNSUPP (-EOPNOTSUPP) 44*4882a593Smuzhiyun 45*4882a593Smuzhiyun struct cpu_hw_events { 46*4882a593Smuzhiyun /* # currently enabled events*/ 47*4882a593Smuzhiyun int n_events; 48*4882a593Smuzhiyun /* currently enabled events */ 49*4882a593Smuzhiyun struct perf_event *events[RISCV_MAX_COUNTERS]; 50*4882a593Smuzhiyun /* vendor-defined PMU data */ 51*4882a593Smuzhiyun void *platform; 52*4882a593Smuzhiyun }; 53*4882a593Smuzhiyun 54*4882a593Smuzhiyun struct riscv_pmu { 55*4882a593Smuzhiyun struct pmu *pmu; 56*4882a593Smuzhiyun 57*4882a593Smuzhiyun /* generic hw/cache events table */ 58*4882a593Smuzhiyun const int *hw_events; 59*4882a593Smuzhiyun const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX] 60*4882a593Smuzhiyun [PERF_COUNT_HW_CACHE_OP_MAX] 61*4882a593Smuzhiyun [PERF_COUNT_HW_CACHE_RESULT_MAX]; 62*4882a593Smuzhiyun /* method used to map hw/cache events */ 63*4882a593Smuzhiyun int (*map_hw_event)(u64 config); 64*4882a593Smuzhiyun int (*map_cache_event)(u64 config); 65*4882a593Smuzhiyun 66*4882a593Smuzhiyun /* max generic hw events in map */ 67*4882a593Smuzhiyun int max_events; 68*4882a593Smuzhiyun /* number total counters, 2(base) + x(general) */ 69*4882a593Smuzhiyun int num_counters; 70*4882a593Smuzhiyun /* the width of the counter */ 71*4882a593Smuzhiyun int counter_width; 72*4882a593Smuzhiyun 73*4882a593Smuzhiyun /* vendor-defined PMU features */ 74*4882a593Smuzhiyun void *platform; 75*4882a593Smuzhiyun 76*4882a593Smuzhiyun irqreturn_t (*handle_irq)(int irq_num, void *dev); 77*4882a593Smuzhiyun int irq; 78*4882a593Smuzhiyun }; 79*4882a593Smuzhiyun 80*4882a593Smuzhiyun #endif 81*4882a593Smuzhiyun #ifdef CONFIG_PERF_EVENTS 82*4882a593Smuzhiyun #define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs 83*4882a593Smuzhiyun #endif 84*4882a593Smuzhiyun 85*4882a593Smuzhiyun #endif /* _ASM_RISCV_PERF_EVENT_H */ 86