1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun 3*4882a593Smuzhiyun #undef TRACE_SYSTEM_VAR 4*4882a593Smuzhiyun 5*4882a593Smuzhiyun #ifdef CONFIG_BPF_EVENTS 6*4882a593Smuzhiyun 7*4882a593Smuzhiyun #undef __entry 8*4882a593Smuzhiyun #define __entry entry 9*4882a593Smuzhiyun 10*4882a593Smuzhiyun #undef __get_dynamic_array 11*4882a593Smuzhiyun #define __get_dynamic_array(field) \ 12*4882a593Smuzhiyun ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun #undef __get_dynamic_array_len 15*4882a593Smuzhiyun #define __get_dynamic_array_len(field) \ 16*4882a593Smuzhiyun ((__entry->__data_loc_##field >> 16) & 0xffff) 17*4882a593Smuzhiyun 18*4882a593Smuzhiyun #undef __get_str 19*4882a593Smuzhiyun #define __get_str(field) ((char *)__get_dynamic_array(field)) 20*4882a593Smuzhiyun 21*4882a593Smuzhiyun #undef __get_bitmask 22*4882a593Smuzhiyun #define __get_bitmask(field) (char *)__get_dynamic_array(field) 23*4882a593Smuzhiyun 24*4882a593Smuzhiyun #undef __perf_count 25*4882a593Smuzhiyun #define __perf_count(c) (c) 26*4882a593Smuzhiyun 27*4882a593Smuzhiyun #undef __perf_task 28*4882a593Smuzhiyun #define __perf_task(t) (t) 29*4882a593Smuzhiyun 30*4882a593Smuzhiyun /* cast any integer, pointer, or small struct to u64 */ 31*4882a593Smuzhiyun #define UINTTYPE(size) \ 32*4882a593Smuzhiyun __typeof__(__builtin_choose_expr(size == 1, (u8)1, \ 33*4882a593Smuzhiyun __builtin_choose_expr(size == 2, (u16)2, \ 34*4882a593Smuzhiyun __builtin_choose_expr(size == 4, (u32)3, \ 35*4882a593Smuzhiyun __builtin_choose_expr(size == 8, (u64)4, \ 36*4882a593Smuzhiyun (void)5))))) 37*4882a593Smuzhiyun #define __CAST_TO_U64(x) ({ \ 38*4882a593Smuzhiyun typeof(x) __src = (x); \ 39*4882a593Smuzhiyun UINTTYPE(sizeof(x)) __dst; \ 40*4882a593Smuzhiyun memcpy(&__dst, &__src, sizeof(__dst)); \ 41*4882a593Smuzhiyun (u64)__dst; }) 42*4882a593Smuzhiyun 43*4882a593Smuzhiyun #define __CAST1(a,...) __CAST_TO_U64(a) 44*4882a593Smuzhiyun #define __CAST2(a,...) __CAST_TO_U64(a), __CAST1(__VA_ARGS__) 45*4882a593Smuzhiyun #define __CAST3(a,...) __CAST_TO_U64(a), __CAST2(__VA_ARGS__) 46*4882a593Smuzhiyun #define __CAST4(a,...) __CAST_TO_U64(a), __CAST3(__VA_ARGS__) 47*4882a593Smuzhiyun #define __CAST5(a,...) __CAST_TO_U64(a), __CAST4(__VA_ARGS__) 48*4882a593Smuzhiyun #define __CAST6(a,...) __CAST_TO_U64(a), __CAST5(__VA_ARGS__) 49*4882a593Smuzhiyun #define __CAST7(a,...) __CAST_TO_U64(a), __CAST6(__VA_ARGS__) 50*4882a593Smuzhiyun #define __CAST8(a,...) __CAST_TO_U64(a), __CAST7(__VA_ARGS__) 51*4882a593Smuzhiyun #define __CAST9(a,...) __CAST_TO_U64(a), __CAST8(__VA_ARGS__) 52*4882a593Smuzhiyun #define __CAST10(a,...) __CAST_TO_U64(a), __CAST9(__VA_ARGS__) 53*4882a593Smuzhiyun #define __CAST11(a,...) __CAST_TO_U64(a), __CAST10(__VA_ARGS__) 54*4882a593Smuzhiyun #define __CAST12(a,...) __CAST_TO_U64(a), __CAST11(__VA_ARGS__) 55*4882a593Smuzhiyun /* tracepoints with more than 12 arguments will hit build error */ 56*4882a593Smuzhiyun #define CAST_TO_U64(...) CONCATENATE(__CAST, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__) 57*4882a593Smuzhiyun 58*4882a593Smuzhiyun #undef DECLARE_EVENT_CLASS 59*4882a593Smuzhiyun #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ 60*4882a593Smuzhiyun static notrace void \ 61*4882a593Smuzhiyun __bpf_trace_##call(void *__data, proto) \ 62*4882a593Smuzhiyun { \ 63*4882a593Smuzhiyun struct bpf_prog *prog = __data; \ 64*4882a593Smuzhiyun CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(prog, CAST_TO_U64(args)); \ 65*4882a593Smuzhiyun } 66*4882a593Smuzhiyun 67*4882a593Smuzhiyun /* 68*4882a593Smuzhiyun * This part is compiled out, it is only here as a build time check 69*4882a593Smuzhiyun * to make sure that if the tracepoint handling changes, the 70*4882a593Smuzhiyun * bpf probe will fail to compile unless it too is updated. 71*4882a593Smuzhiyun */ 72*4882a593Smuzhiyun #define __DEFINE_EVENT(template, call, proto, args, size) \ 73*4882a593Smuzhiyun static inline void bpf_test_probe_##call(void) \ 74*4882a593Smuzhiyun { \ 75*4882a593Smuzhiyun check_trace_callback_type_##call(__bpf_trace_##template); \ 76*4882a593Smuzhiyun } \ 77*4882a593Smuzhiyun typedef void (*btf_trace_##call)(void *__data, proto); \ 78*4882a593Smuzhiyun static union { \ 79*4882a593Smuzhiyun struct bpf_raw_event_map event; \ 80*4882a593Smuzhiyun btf_trace_##call handler; \ 81*4882a593Smuzhiyun } __bpf_trace_tp_map_##call __used \ 82*4882a593Smuzhiyun __section("__bpf_raw_tp_map") = { \ 83*4882a593Smuzhiyun .event = { \ 84*4882a593Smuzhiyun .tp = &__tracepoint_##call, \ 85*4882a593Smuzhiyun .bpf_func = __bpf_trace_##template, \ 86*4882a593Smuzhiyun .num_args = COUNT_ARGS(args), \ 87*4882a593Smuzhiyun .writable_size = size, \ 88*4882a593Smuzhiyun }, \ 89*4882a593Smuzhiyun }; 90*4882a593Smuzhiyun 91*4882a593Smuzhiyun #define FIRST(x, ...) x 92*4882a593Smuzhiyun 93*4882a593Smuzhiyun #undef DEFINE_EVENT_WRITABLE 94*4882a593Smuzhiyun #define DEFINE_EVENT_WRITABLE(template, call, proto, args, size) \ 95*4882a593Smuzhiyun static inline void bpf_test_buffer_##call(void) \ 96*4882a593Smuzhiyun { \ 97*4882a593Smuzhiyun /* BUILD_BUG_ON() is ignored if the code is completely eliminated, but \ 98*4882a593Smuzhiyun * BUILD_BUG_ON_ZERO() uses a different mechanism that is not \ 99*4882a593Smuzhiyun * dead-code-eliminated. \ 100*4882a593Smuzhiyun */ \ 101*4882a593Smuzhiyun FIRST(proto); \ 102*4882a593Smuzhiyun (void)BUILD_BUG_ON_ZERO(size != sizeof(*FIRST(args))); \ 103*4882a593Smuzhiyun } \ 104*4882a593Smuzhiyun __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size) 105*4882a593Smuzhiyun 106*4882a593Smuzhiyun #undef DEFINE_EVENT 107*4882a593Smuzhiyun #define DEFINE_EVENT(template, call, proto, args) \ 108*4882a593Smuzhiyun __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), 0) 109*4882a593Smuzhiyun 110*4882a593Smuzhiyun #undef DEFINE_EVENT_PRINT 111*4882a593Smuzhiyun #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ 112*4882a593Smuzhiyun DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) 113*4882a593Smuzhiyun 114*4882a593Smuzhiyun #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 115*4882a593Smuzhiyun 116*4882a593Smuzhiyun #undef DEFINE_EVENT_WRITABLE 117*4882a593Smuzhiyun #undef __DEFINE_EVENT 118*4882a593Smuzhiyun #undef FIRST 119*4882a593Smuzhiyun 120*4882a593Smuzhiyun #endif /* CONFIG_BPF_EVENTS */ 121