1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun #undef TRACE_SYSTEM 3*4882a593Smuzhiyun #define TRACE_SYSTEM percpu 4*4882a593Smuzhiyun 5*4882a593Smuzhiyun #if !defined(_TRACE_PERCPU_H) || defined(TRACE_HEADER_MULTI_READ) 6*4882a593Smuzhiyun #define _TRACE_PERCPU_H 7*4882a593Smuzhiyun 8*4882a593Smuzhiyun #include <linux/tracepoint.h> 9*4882a593Smuzhiyun 10*4882a593Smuzhiyun TRACE_EVENT(percpu_alloc_percpu, 11*4882a593Smuzhiyun 12*4882a593Smuzhiyun TP_PROTO(bool reserved, bool is_atomic, size_t size, 13*4882a593Smuzhiyun size_t align, void *base_addr, int off, void __percpu *ptr), 14*4882a593Smuzhiyun 15*4882a593Smuzhiyun TP_ARGS(reserved, is_atomic, size, align, base_addr, off, ptr), 16*4882a593Smuzhiyun 17*4882a593Smuzhiyun TP_STRUCT__entry( 18*4882a593Smuzhiyun __field( bool, reserved ) 19*4882a593Smuzhiyun __field( bool, is_atomic ) 20*4882a593Smuzhiyun __field( size_t, size ) 21*4882a593Smuzhiyun __field( size_t, align ) 22*4882a593Smuzhiyun __field( void *, base_addr ) 23*4882a593Smuzhiyun __field( int, off ) 24*4882a593Smuzhiyun __field( void __percpu *, ptr ) 25*4882a593Smuzhiyun ), 26*4882a593Smuzhiyun 27*4882a593Smuzhiyun TP_fast_assign( 28*4882a593Smuzhiyun __entry->reserved = reserved; 29*4882a593Smuzhiyun __entry->is_atomic = is_atomic; 30*4882a593Smuzhiyun __entry->size = size; 31*4882a593Smuzhiyun __entry->align = align; 32*4882a593Smuzhiyun __entry->base_addr = base_addr; 33*4882a593Smuzhiyun __entry->off = off; 34*4882a593Smuzhiyun __entry->ptr = ptr; 35*4882a593Smuzhiyun ), 36*4882a593Smuzhiyun 37*4882a593Smuzhiyun TP_printk("reserved=%d is_atomic=%d size=%zu align=%zu base_addr=%p off=%d ptr=%p", 38*4882a593Smuzhiyun __entry->reserved, __entry->is_atomic, 39*4882a593Smuzhiyun __entry->size, __entry->align, 40*4882a593Smuzhiyun __entry->base_addr, __entry->off, __entry->ptr) 41*4882a593Smuzhiyun ); 42*4882a593Smuzhiyun 43*4882a593Smuzhiyun TRACE_EVENT(percpu_free_percpu, 44*4882a593Smuzhiyun 45*4882a593Smuzhiyun TP_PROTO(void *base_addr, int off, void __percpu *ptr), 46*4882a593Smuzhiyun 47*4882a593Smuzhiyun TP_ARGS(base_addr, off, ptr), 48*4882a593Smuzhiyun 49*4882a593Smuzhiyun TP_STRUCT__entry( 50*4882a593Smuzhiyun __field( void *, base_addr ) 51*4882a593Smuzhiyun __field( int, off ) 52*4882a593Smuzhiyun __field( void __percpu *, ptr ) 53*4882a593Smuzhiyun ), 54*4882a593Smuzhiyun 55*4882a593Smuzhiyun TP_fast_assign( 56*4882a593Smuzhiyun __entry->base_addr = base_addr; 57*4882a593Smuzhiyun __entry->off = off; 58*4882a593Smuzhiyun __entry->ptr = ptr; 59*4882a593Smuzhiyun ), 60*4882a593Smuzhiyun 61*4882a593Smuzhiyun TP_printk("base_addr=%p off=%d ptr=%p", 62*4882a593Smuzhiyun __entry->base_addr, __entry->off, __entry->ptr) 63*4882a593Smuzhiyun ); 64*4882a593Smuzhiyun 65*4882a593Smuzhiyun TRACE_EVENT(percpu_alloc_percpu_fail, 66*4882a593Smuzhiyun 67*4882a593Smuzhiyun TP_PROTO(bool reserved, bool is_atomic, size_t size, size_t align), 68*4882a593Smuzhiyun 69*4882a593Smuzhiyun TP_ARGS(reserved, is_atomic, size, align), 70*4882a593Smuzhiyun 71*4882a593Smuzhiyun TP_STRUCT__entry( 72*4882a593Smuzhiyun __field( bool, reserved ) 73*4882a593Smuzhiyun __field( bool, is_atomic ) 74*4882a593Smuzhiyun __field( size_t, size ) 75*4882a593Smuzhiyun __field( size_t, align ) 76*4882a593Smuzhiyun ), 77*4882a593Smuzhiyun 78*4882a593Smuzhiyun TP_fast_assign( 79*4882a593Smuzhiyun __entry->reserved = reserved; 80*4882a593Smuzhiyun __entry->is_atomic = is_atomic; 81*4882a593Smuzhiyun __entry->size = size; 82*4882a593Smuzhiyun __entry->align = align; 83*4882a593Smuzhiyun ), 84*4882a593Smuzhiyun 85*4882a593Smuzhiyun TP_printk("reserved=%d is_atomic=%d size=%zu align=%zu", 86*4882a593Smuzhiyun __entry->reserved, __entry->is_atomic, 87*4882a593Smuzhiyun __entry->size, __entry->align) 88*4882a593Smuzhiyun ); 89*4882a593Smuzhiyun 90*4882a593Smuzhiyun TRACE_EVENT(percpu_create_chunk, 91*4882a593Smuzhiyun 92*4882a593Smuzhiyun TP_PROTO(void *base_addr), 93*4882a593Smuzhiyun 94*4882a593Smuzhiyun TP_ARGS(base_addr), 95*4882a593Smuzhiyun 96*4882a593Smuzhiyun TP_STRUCT__entry( 97*4882a593Smuzhiyun __field( void *, base_addr ) 98*4882a593Smuzhiyun ), 99*4882a593Smuzhiyun 100*4882a593Smuzhiyun TP_fast_assign( 101*4882a593Smuzhiyun __entry->base_addr = base_addr; 102*4882a593Smuzhiyun ), 103*4882a593Smuzhiyun 104*4882a593Smuzhiyun TP_printk("base_addr=%p", __entry->base_addr) 105*4882a593Smuzhiyun ); 106*4882a593Smuzhiyun 107*4882a593Smuzhiyun TRACE_EVENT(percpu_destroy_chunk, 108*4882a593Smuzhiyun 109*4882a593Smuzhiyun TP_PROTO(void *base_addr), 110*4882a593Smuzhiyun 111*4882a593Smuzhiyun TP_ARGS(base_addr), 112*4882a593Smuzhiyun 113*4882a593Smuzhiyun TP_STRUCT__entry( 114*4882a593Smuzhiyun __field( void *, base_addr ) 115*4882a593Smuzhiyun ), 116*4882a593Smuzhiyun 117*4882a593Smuzhiyun TP_fast_assign( 118*4882a593Smuzhiyun __entry->base_addr = base_addr; 119*4882a593Smuzhiyun ), 120*4882a593Smuzhiyun 121*4882a593Smuzhiyun TP_printk("base_addr=%p", __entry->base_addr) 122*4882a593Smuzhiyun ); 123*4882a593Smuzhiyun 124*4882a593Smuzhiyun #endif /* _TRACE_PERCPU_H */ 125*4882a593Smuzhiyun 126*4882a593Smuzhiyun #include <trace/define_trace.h> 127