1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 2*4882a593Smuzhiyun #include <linux/percpu.h> 3*4882a593Smuzhiyun #include <linux/jump_label.h> 4*4882a593Smuzhiyun #include <asm/trace.h> 5*4882a593Smuzhiyun #include <asm/asm-prototypes.h> 6*4882a593Smuzhiyun 7*4882a593Smuzhiyun #ifdef CONFIG_JUMP_LABEL 8*4882a593Smuzhiyun struct static_key opal_tracepoint_key = STATIC_KEY_INIT; 9*4882a593Smuzhiyun opal_tracepoint_regfunc(void)10*4882a593Smuzhiyunint opal_tracepoint_regfunc(void) 11*4882a593Smuzhiyun { 12*4882a593Smuzhiyun static_key_slow_inc(&opal_tracepoint_key); 13*4882a593Smuzhiyun return 0; 14*4882a593Smuzhiyun } 15*4882a593Smuzhiyun opal_tracepoint_unregfunc(void)16*4882a593Smuzhiyunvoid opal_tracepoint_unregfunc(void) 17*4882a593Smuzhiyun { 18*4882a593Smuzhiyun static_key_slow_dec(&opal_tracepoint_key); 19*4882a593Smuzhiyun } 20*4882a593Smuzhiyun #else 21*4882a593Smuzhiyun /* 22*4882a593Smuzhiyun * We optimise OPAL calls by placing opal_tracepoint_refcount 23*4882a593Smuzhiyun * directly in the TOC so we can check if the opal tracepoints are 24*4882a593Smuzhiyun * enabled via a single load. 25*4882a593Smuzhiyun */ 26*4882a593Smuzhiyun 27*4882a593Smuzhiyun /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ 28*4882a593Smuzhiyun extern long opal_tracepoint_refcount; 29*4882a593Smuzhiyun opal_tracepoint_regfunc(void)30*4882a593Smuzhiyunint opal_tracepoint_regfunc(void) 31*4882a593Smuzhiyun { 32*4882a593Smuzhiyun opal_tracepoint_refcount++; 33*4882a593Smuzhiyun return 0; 34*4882a593Smuzhiyun } 35*4882a593Smuzhiyun opal_tracepoint_unregfunc(void)36*4882a593Smuzhiyunvoid opal_tracepoint_unregfunc(void) 37*4882a593Smuzhiyun { 38*4882a593Smuzhiyun opal_tracepoint_refcount--; 39*4882a593Smuzhiyun } 40*4882a593Smuzhiyun #endif 41*4882a593Smuzhiyun 42*4882a593Smuzhiyun /* 43*4882a593Smuzhiyun * Since the tracing code might execute OPAL calls we need to guard against 44*4882a593Smuzhiyun * recursion. 45*4882a593Smuzhiyun */ 46*4882a593Smuzhiyun static DEFINE_PER_CPU(unsigned int, opal_trace_depth); 47*4882a593Smuzhiyun __trace_opal_entry(unsigned long opcode,unsigned long * args)48*4882a593Smuzhiyunvoid __trace_opal_entry(unsigned long opcode, unsigned long *args) 49*4882a593Smuzhiyun { 50*4882a593Smuzhiyun unsigned long flags; 51*4882a593Smuzhiyun unsigned int *depth; 52*4882a593Smuzhiyun 53*4882a593Smuzhiyun local_irq_save(flags); 54*4882a593Smuzhiyun 55*4882a593Smuzhiyun depth = this_cpu_ptr(&opal_trace_depth); 56*4882a593Smuzhiyun 57*4882a593Smuzhiyun if (*depth) 58*4882a593Smuzhiyun goto out; 59*4882a593Smuzhiyun 60*4882a593Smuzhiyun (*depth)++; 61*4882a593Smuzhiyun preempt_disable(); 62*4882a593Smuzhiyun trace_opal_entry(opcode, args); 63*4882a593Smuzhiyun (*depth)--; 64*4882a593Smuzhiyun 65*4882a593Smuzhiyun out: 66*4882a593Smuzhiyun local_irq_restore(flags); 67*4882a593Smuzhiyun } 68*4882a593Smuzhiyun __trace_opal_exit(long opcode,unsigned long retval)69*4882a593Smuzhiyunvoid __trace_opal_exit(long opcode, unsigned long retval) 70*4882a593Smuzhiyun { 71*4882a593Smuzhiyun unsigned long flags; 72*4882a593Smuzhiyun unsigned int *depth; 73*4882a593Smuzhiyun 74*4882a593Smuzhiyun local_irq_save(flags); 75*4882a593Smuzhiyun 76*4882a593Smuzhiyun depth = this_cpu_ptr(&opal_trace_depth); 77*4882a593Smuzhiyun 78*4882a593Smuzhiyun if (*depth) 79*4882a593Smuzhiyun goto out; 80*4882a593Smuzhiyun 81*4882a593Smuzhiyun (*depth)++; 82*4882a593Smuzhiyun trace_opal_exit(opcode, retval); 83*4882a593Smuzhiyun preempt_enable(); 84*4882a593Smuzhiyun (*depth)--; 85*4882a593Smuzhiyun 86*4882a593Smuzhiyun out: 87*4882a593Smuzhiyun local_irq_restore(flags); 88*4882a593Smuzhiyun } 89