1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * 4*4882a593Smuzhiyun * (C) COPYRIGHT 2011-2016, 2018-2021 ARM Limited. All rights reserved. 5*4882a593Smuzhiyun * 6*4882a593Smuzhiyun * This program is free software and is provided to you under the terms of the 7*4882a593Smuzhiyun * GNU General Public License version 2 as published by the Free Software 8*4882a593Smuzhiyun * Foundation, and any use by you of this program is subject to the terms 9*4882a593Smuzhiyun * of such GNU license. 10*4882a593Smuzhiyun * 11*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, 12*4882a593Smuzhiyun * but WITHOUT ANY WARRANTY; without even the implied warranty of 13*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14*4882a593Smuzhiyun * GNU General Public License for more details. 15*4882a593Smuzhiyun * 16*4882a593Smuzhiyun * You should have received a copy of the GNU General Public License 17*4882a593Smuzhiyun * along with this program; if not, you can access it online at 18*4882a593Smuzhiyun * http://www.gnu.org/licenses/gpl-2.0.html. 19*4882a593Smuzhiyun * 20*4882a593Smuzhiyun */ 21*4882a593Smuzhiyun 22*4882a593Smuzhiyun #undef TRACE_SYSTEM 23*4882a593Smuzhiyun #define TRACE_SYSTEM mali 24*4882a593Smuzhiyun 25*4882a593Smuzhiyun #if !defined(_TRACE_MALI_H) || defined(TRACE_HEADER_MULTI_READ) 26*4882a593Smuzhiyun #define _TRACE_MALI_H 27*4882a593Smuzhiyun 28*4882a593Smuzhiyun #include <linux/tracepoint.h> 29*4882a593Smuzhiyun 30*4882a593Smuzhiyun #if defined(CONFIG_MALI_BIFROST_GATOR_SUPPORT) 31*4882a593Smuzhiyun #define MALI_JOB_SLOTS_EVENT_CHANGED 32*4882a593Smuzhiyun 33*4882a593Smuzhiyun /* 34*4882a593Smuzhiyun * mali_job_slots_event - Reports change of job slot status. 35*4882a593Smuzhiyun * @gpu_id: Kbase device id 36*4882a593Smuzhiyun * @event_id: ORed together bitfields representing a type of event, 37*4882a593Smuzhiyun * made with the GATOR_MAKE_EVENT() macro. 38*4882a593Smuzhiyun */ 39*4882a593Smuzhiyun TRACE_EVENT(mali_job_slots_event, 40*4882a593Smuzhiyun TP_PROTO(u32 gpu_id, u32 event_id, u32 tgid, u32 pid, 41*4882a593Smuzhiyun u8 job_id), 42*4882a593Smuzhiyun TP_ARGS(gpu_id, event_id, tgid, pid, job_id), 43*4882a593Smuzhiyun TP_STRUCT__entry( 44*4882a593Smuzhiyun __field(u32, gpu_id) 45*4882a593Smuzhiyun __field(u32, event_id) 46*4882a593Smuzhiyun __field(u32, tgid) 47*4882a593Smuzhiyun __field(u32, pid) 48*4882a593Smuzhiyun __field(u8, job_id) 49*4882a593Smuzhiyun ), 50*4882a593Smuzhiyun TP_fast_assign( 51*4882a593Smuzhiyun __entry->gpu_id = gpu_id; 52*4882a593Smuzhiyun __entry->event_id = event_id; 53*4882a593Smuzhiyun __entry->tgid = tgid; 54*4882a593Smuzhiyun __entry->pid = pid; 55*4882a593Smuzhiyun __entry->job_id = job_id; 56*4882a593Smuzhiyun ), 57*4882a593Smuzhiyun TP_printk("gpu=%u event=%u tgid=%u pid=%u job_id=%u", 58*4882a593Smuzhiyun __entry->gpu_id, __entry->event_id, 59*4882a593Smuzhiyun __entry->tgid, __entry->pid, __entry->job_id) 60*4882a593Smuzhiyun ); 61*4882a593Smuzhiyun 62*4882a593Smuzhiyun /** 63*4882a593Smuzhiyun * mali_pm_status - Reports change of power management status. 64*4882a593Smuzhiyun * @gpu_id: Kbase device id 65*4882a593Smuzhiyun * @event_id: Core type (shader, tiler, L2 cache) 66*4882a593Smuzhiyun * @value: 64bits bitmask reporting either power status of 67*4882a593Smuzhiyun * the cores (1-ON, 0-OFF) 68*4882a593Smuzhiyun */ 69*4882a593Smuzhiyun TRACE_EVENT(mali_pm_status, 70*4882a593Smuzhiyun TP_PROTO(u32 gpu_id, u32 event_id, u64 value), 71*4882a593Smuzhiyun TP_ARGS(gpu_id, event_id, value), 72*4882a593Smuzhiyun TP_STRUCT__entry( 73*4882a593Smuzhiyun __field(u32, gpu_id) 74*4882a593Smuzhiyun __field(u32, event_id) 75*4882a593Smuzhiyun __field(u64, value) 76*4882a593Smuzhiyun ), 77*4882a593Smuzhiyun TP_fast_assign( 78*4882a593Smuzhiyun __entry->gpu_id = gpu_id; 79*4882a593Smuzhiyun __entry->event_id = event_id; 80*4882a593Smuzhiyun __entry->value = value; 81*4882a593Smuzhiyun ), 82*4882a593Smuzhiyun TP_printk("gpu=%u event %u = %llu", 83*4882a593Smuzhiyun __entry->gpu_id, __entry->event_id, __entry->value) 84*4882a593Smuzhiyun ); 85*4882a593Smuzhiyun 86*4882a593Smuzhiyun /** 87*4882a593Smuzhiyun * mali_page_fault_insert_pages - Reports an MMU page fault 88*4882a593Smuzhiyun * resulting in new pages being mapped. 89*4882a593Smuzhiyun * @gpu_id: Kbase device id 90*4882a593Smuzhiyun * @event_id: MMU address space number 91*4882a593Smuzhiyun * @value: Number of newly allocated pages 92*4882a593Smuzhiyun */ 93*4882a593Smuzhiyun TRACE_EVENT(mali_page_fault_insert_pages, 94*4882a593Smuzhiyun TP_PROTO(u32 gpu_id, s32 event_id, u64 value), 95*4882a593Smuzhiyun TP_ARGS(gpu_id, event_id, value), 96*4882a593Smuzhiyun TP_STRUCT__entry( 97*4882a593Smuzhiyun __field(u32, gpu_id) 98*4882a593Smuzhiyun __field(s32, event_id) 99*4882a593Smuzhiyun __field(u64, value) 100*4882a593Smuzhiyun ), 101*4882a593Smuzhiyun TP_fast_assign( 102*4882a593Smuzhiyun __entry->gpu_id = gpu_id; 103*4882a593Smuzhiyun __entry->event_id = event_id; 104*4882a593Smuzhiyun __entry->value = value; 105*4882a593Smuzhiyun ), 106*4882a593Smuzhiyun TP_printk("gpu=%u event %d = %llu", 107*4882a593Smuzhiyun __entry->gpu_id, __entry->event_id, __entry->value) 108*4882a593Smuzhiyun ); 109*4882a593Smuzhiyun 110*4882a593Smuzhiyun /** 111*4882a593Smuzhiyun * mali_total_alloc_pages_change - Reports that the total number of 112*4882a593Smuzhiyun * allocated pages has changed. 113*4882a593Smuzhiyun * @gpu_id: Kbase device id 114*4882a593Smuzhiyun * @event_id: Total number of pages allocated 115*4882a593Smuzhiyun */ 116*4882a593Smuzhiyun TRACE_EVENT(mali_total_alloc_pages_change, 117*4882a593Smuzhiyun TP_PROTO(u32 gpu_id, s64 event_id), 118*4882a593Smuzhiyun TP_ARGS(gpu_id, event_id), 119*4882a593Smuzhiyun TP_STRUCT__entry( 120*4882a593Smuzhiyun __field(u32, gpu_id) 121*4882a593Smuzhiyun __field(s64, event_id) 122*4882a593Smuzhiyun ), 123*4882a593Smuzhiyun TP_fast_assign( 124*4882a593Smuzhiyun __entry->gpu_id = gpu_id; 125*4882a593Smuzhiyun __entry->event_id = event_id; 126*4882a593Smuzhiyun ), 127*4882a593Smuzhiyun TP_printk("gpu=%u event=%lld", __entry->gpu_id, __entry->event_id) 128*4882a593Smuzhiyun ); 129*4882a593Smuzhiyun #endif /* CONFIG_MALI_BIFROST_GATOR_SUPPORT */ 130*4882a593Smuzhiyun 131*4882a593Smuzhiyun /* 132*4882a593Smuzhiyun * MMU subsystem tracepoints 133*4882a593Smuzhiyun */ 134*4882a593Smuzhiyun 135*4882a593Smuzhiyun /* Fault status and exception code helpers 136*4882a593Smuzhiyun * 137*4882a593Smuzhiyun * Must be macros to allow use by user-side tracepoint tools 138*4882a593Smuzhiyun * 139*4882a593Smuzhiyun * bits 0:1 masked off code, and used for the level 140*4882a593Smuzhiyun * 141*4882a593Smuzhiyun * Tracepoint files get included more than once - protect against multiple 142*4882a593Smuzhiyun * definition 143*4882a593Smuzhiyun */ 144*4882a593Smuzhiyun #ifndef __TRACE_MALI_MMU_HELPERS 145*4882a593Smuzhiyun #define __TRACE_MALI_MMU_HELPERS 146*4882a593Smuzhiyun /* Complex macros should be enclosed in parenthesis. 147*4882a593Smuzhiyun * 148*4882a593Smuzhiyun * We need to have those parentheses removed for our arrays of symbolic look-ups 149*4882a593Smuzhiyun * for __print_symbolic() whilst also being able to use them outside trace code 150*4882a593Smuzhiyun */ 151*4882a593Smuzhiyun #define _ENSURE_PARENTHESIS(args...) args 152*4882a593Smuzhiyun 153*4882a593Smuzhiyun #define KBASE_MMU_FAULT_CODE_EXCEPTION_NAME_PRINT(code) \ 154*4882a593Smuzhiyun (!KBASE_MMU_FAULT_CODE_VALID(code) ? "UNKNOWN,level=" : \ 155*4882a593Smuzhiyun __print_symbolic(((code) & ~3u), \ 156*4882a593Smuzhiyun KBASE_MMU_FAULT_CODE_SYMBOLIC_STRINGS)) 157*4882a593Smuzhiyun #define KBASE_MMU_FAULT_CODE_LEVEL(code) \ 158*4882a593Smuzhiyun (((((code) & ~0x3u) == 0xC4) ? 4 : 0) + ((code) & 0x3u)) 159*4882a593Smuzhiyun 160*4882a593Smuzhiyun #define KBASE_MMU_FAULT_STATUS_CODE(status) \ 161*4882a593Smuzhiyun ((status) & 0xFFu) 162*4882a593Smuzhiyun #define KBASE_MMU_FAULT_STATUS_DECODED_STRING(status) \ 163*4882a593Smuzhiyun (((status) & (1u << 10)) ? "DECODER_FAULT" : "SLAVE_FAULT") 164*4882a593Smuzhiyun 165*4882a593Smuzhiyun #define KBASE_MMU_FAULT_STATUS_EXCEPTION_NAME_PRINT(status) \ 166*4882a593Smuzhiyun KBASE_MMU_FAULT_CODE_EXCEPTION_NAME_PRINT( \ 167*4882a593Smuzhiyun KBASE_MMU_FAULT_STATUS_CODE(status)) 168*4882a593Smuzhiyun 169*4882a593Smuzhiyun #define KBASE_MMU_FAULT_STATUS_LEVEL(status) \ 170*4882a593Smuzhiyun KBASE_MMU_FAULT_CODE_LEVEL(KBASE_MMU_FAULT_STATUS_CODE(status)) 171*4882a593Smuzhiyun 172*4882a593Smuzhiyun #define KBASE_MMU_FAULT_STATUS_ACCESS(status) \ 173*4882a593Smuzhiyun ((status) & AS_FAULTSTATUS_ACCESS_TYPE_MASK) 174*4882a593Smuzhiyun #define KBASE_MMU_FAULT_ACCESS_SYMBOLIC_STRINGS _ENSURE_PARENTHESIS(\ 175*4882a593Smuzhiyun {AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC, "ATOMIC" }, \ 176*4882a593Smuzhiyun {AS_FAULTSTATUS_ACCESS_TYPE_EX, "EXECUTE"}, \ 177*4882a593Smuzhiyun {AS_FAULTSTATUS_ACCESS_TYPE_READ, "READ" }, \ 178*4882a593Smuzhiyun {AS_FAULTSTATUS_ACCESS_TYPE_WRITE, "WRITE" }) 179*4882a593Smuzhiyun #define KBASE_MMU_FAULT_STATUS_ACCESS_PRINT(status) \ 180*4882a593Smuzhiyun __print_symbolic(KBASE_MMU_FAULT_STATUS_ACCESS(status), \ 181*4882a593Smuzhiyun KBASE_MMU_FAULT_ACCESS_SYMBOLIC_STRINGS) 182*4882a593Smuzhiyun 183*4882a593Smuzhiyun #if MALI_USE_CSF 184*4882a593Smuzhiyun #define KBASE_MMU_FAULT_CODE_VALID(code) \ 185*4882a593Smuzhiyun ((code >= 0xC0 && code <= 0xEB) && \ 186*4882a593Smuzhiyun (!(code >= 0xC5 && code <= 0xC7)) && \ 187*4882a593Smuzhiyun (!(code >= 0xCC && code <= 0xD8)) && \ 188*4882a593Smuzhiyun (!(code >= 0xDC && code <= 0xDF)) && \ 189*4882a593Smuzhiyun (!(code >= 0xE1 && code <= 0xE3))) 190*4882a593Smuzhiyun #define KBASE_MMU_FAULT_CODE_SYMBOLIC_STRINGS _ENSURE_PARENTHESIS(\ 191*4882a593Smuzhiyun {0xC0, "TRANSLATION_FAULT_" }, \ 192*4882a593Smuzhiyun {0xC4, "TRANSLATION_FAULT_" }, \ 193*4882a593Smuzhiyun {0xC8, "PERMISSION_FAULT_" }, \ 194*4882a593Smuzhiyun {0xD0, "TRANSTAB_BUS_FAULT_" }, \ 195*4882a593Smuzhiyun {0xD8, "ACCESS_FLAG_" }, \ 196*4882a593Smuzhiyun {0xE0, "ADDRESS_SIZE_FAULT_IN" }, \ 197*4882a593Smuzhiyun {0xE4, "ADDRESS_SIZE_FAULT_OUT" }, \ 198*4882a593Smuzhiyun {0xE8, "MEMORY_ATTRIBUTES_FAULT_" }) 199*4882a593Smuzhiyun #else /* MALI_USE_CSF */ 200*4882a593Smuzhiyun #define KBASE_MMU_FAULT_CODE_VALID(code) \ 201*4882a593Smuzhiyun ((code >= 0xC0 && code <= 0xEF) && \ 202*4882a593Smuzhiyun (!(code >= 0xC5 && code <= 0xC6)) && \ 203*4882a593Smuzhiyun (!(code >= 0xCC && code <= 0xCF)) && \ 204*4882a593Smuzhiyun (!(code >= 0xD4 && code <= 0xD7)) && \ 205*4882a593Smuzhiyun (!(code >= 0xDC && code <= 0xDF))) 206*4882a593Smuzhiyun #define KBASE_MMU_FAULT_CODE_SYMBOLIC_STRINGS _ENSURE_PARENTHESIS(\ 207*4882a593Smuzhiyun {0xC0, "TRANSLATION_FAULT_" }, \ 208*4882a593Smuzhiyun {0xC4, "TRANSLATION_FAULT(_7==_IDENTITY)_" }, \ 209*4882a593Smuzhiyun {0xC8, "PERMISSION_FAULT_" }, \ 210*4882a593Smuzhiyun {0xD0, "TRANSTAB_BUS_FAULT_" }, \ 211*4882a593Smuzhiyun {0xD8, "ACCESS_FLAG_" }, \ 212*4882a593Smuzhiyun {0xE0, "ADDRESS_SIZE_FAULT_IN" }, \ 213*4882a593Smuzhiyun {0xE4, "ADDRESS_SIZE_FAULT_OUT" }, \ 214*4882a593Smuzhiyun {0xE8, "MEMORY_ATTRIBUTES_FAULT_" }, \ 215*4882a593Smuzhiyun {0xEC, "MEMORY_ATTRIBUTES_NONCACHEABLE_" }) 216*4882a593Smuzhiyun #endif /* MALI_USE_CSF */ 217*4882a593Smuzhiyun #endif /* __TRACE_MALI_MMU_HELPERS */ 218*4882a593Smuzhiyun 219*4882a593Smuzhiyun /* trace_mali_mmu_page_fault_grow 220*4882a593Smuzhiyun * 221*4882a593Smuzhiyun * Tracepoint about a successful grow of a region due to a GPU page fault 222*4882a593Smuzhiyun */ 223*4882a593Smuzhiyun TRACE_EVENT(mali_mmu_page_fault_grow, 224*4882a593Smuzhiyun TP_PROTO(struct kbase_va_region *reg, struct kbase_fault *fault, 225*4882a593Smuzhiyun size_t new_pages), 226*4882a593Smuzhiyun TP_ARGS(reg, fault, new_pages), 227*4882a593Smuzhiyun TP_STRUCT__entry( 228*4882a593Smuzhiyun __field(u64, start_addr) 229*4882a593Smuzhiyun __field(u64, fault_addr) 230*4882a593Smuzhiyun __field(u64, fault_extra_addr) 231*4882a593Smuzhiyun __field(size_t, new_pages) 232*4882a593Smuzhiyun __field(u32, status) 233*4882a593Smuzhiyun ), 234*4882a593Smuzhiyun TP_fast_assign( 235*4882a593Smuzhiyun __entry->start_addr = ((u64)reg->start_pfn) << PAGE_SHIFT; 236*4882a593Smuzhiyun __entry->fault_addr = fault->addr; 237*4882a593Smuzhiyun __entry->fault_extra_addr = fault->extra_addr; 238*4882a593Smuzhiyun __entry->new_pages = new_pages; 239*4882a593Smuzhiyun __entry->status = fault->status; 240*4882a593Smuzhiyun ), 241*4882a593Smuzhiyun TP_printk("start=0x%llx fault_addr=0x%llx fault_extra_addr=0x%llx new_pages=%zu raw_fault_status=0x%x decoded_faultstatus=%s exception_type=0x%x,%s%u access_type=0x%x,%s source_id=0x%x", 242*4882a593Smuzhiyun __entry->start_addr, __entry->fault_addr, 243*4882a593Smuzhiyun __entry->fault_extra_addr, __entry->new_pages, 244*4882a593Smuzhiyun __entry->status, 245*4882a593Smuzhiyun KBASE_MMU_FAULT_STATUS_DECODED_STRING(__entry->status), 246*4882a593Smuzhiyun KBASE_MMU_FAULT_STATUS_CODE(__entry->status), 247*4882a593Smuzhiyun KBASE_MMU_FAULT_STATUS_EXCEPTION_NAME_PRINT(__entry->status), 248*4882a593Smuzhiyun KBASE_MMU_FAULT_STATUS_LEVEL(__entry->status), 249*4882a593Smuzhiyun KBASE_MMU_FAULT_STATUS_ACCESS(__entry->status) >> 8, 250*4882a593Smuzhiyun KBASE_MMU_FAULT_STATUS_ACCESS_PRINT(__entry->status), 251*4882a593Smuzhiyun __entry->status >> 16) 252*4882a593Smuzhiyun ); 253*4882a593Smuzhiyun 254*4882a593Smuzhiyun 255*4882a593Smuzhiyun 256*4882a593Smuzhiyun 257*4882a593Smuzhiyun /* 258*4882a593Smuzhiyun * Just-in-time memory allocation subsystem tracepoints 259*4882a593Smuzhiyun */ 260*4882a593Smuzhiyun 261*4882a593Smuzhiyun /* Just-in-time memory allocation soft-job template. Override the TP_printk 262*4882a593Smuzhiyun * further if need be. jit_id can be 0. 263*4882a593Smuzhiyun */ 264*4882a593Smuzhiyun DECLARE_EVENT_CLASS(mali_jit_softjob_template, 265*4882a593Smuzhiyun TP_PROTO(struct kbase_va_region *reg, u8 jit_id), 266*4882a593Smuzhiyun TP_ARGS(reg, jit_id), 267*4882a593Smuzhiyun TP_STRUCT__entry( 268*4882a593Smuzhiyun __field(u64, start_addr) 269*4882a593Smuzhiyun __field(size_t, nr_pages) 270*4882a593Smuzhiyun __field(size_t, backed_pages) 271*4882a593Smuzhiyun __field(u8, jit_id) 272*4882a593Smuzhiyun ), 273*4882a593Smuzhiyun TP_fast_assign( 274*4882a593Smuzhiyun __entry->start_addr = ((u64)reg->start_pfn) << PAGE_SHIFT; 275*4882a593Smuzhiyun __entry->nr_pages = reg->nr_pages; 276*4882a593Smuzhiyun __entry->backed_pages = kbase_reg_current_backed_size(reg); 277*4882a593Smuzhiyun __entry->jit_id = jit_id; 278*4882a593Smuzhiyun ), 279*4882a593Smuzhiyun TP_printk("jit_id=%u start=0x%llx va_pages=0x%zx backed_size=0x%zx", 280*4882a593Smuzhiyun __entry->jit_id, __entry->start_addr, __entry->nr_pages, 281*4882a593Smuzhiyun __entry->backed_pages) 282*4882a593Smuzhiyun ); 283*4882a593Smuzhiyun 284*4882a593Smuzhiyun /* trace_mali_jit_alloc() 285*4882a593Smuzhiyun * 286*4882a593Smuzhiyun * Tracepoint about a just-in-time memory allocation soft-job successfully 287*4882a593Smuzhiyun * allocating memory 288*4882a593Smuzhiyun */ 289*4882a593Smuzhiyun DEFINE_EVENT(mali_jit_softjob_template, mali_jit_alloc, 290*4882a593Smuzhiyun TP_PROTO(struct kbase_va_region *reg, u8 jit_id), 291*4882a593Smuzhiyun TP_ARGS(reg, jit_id)); 292*4882a593Smuzhiyun 293*4882a593Smuzhiyun /* trace_mali_jit_free() 294*4882a593Smuzhiyun * 295*4882a593Smuzhiyun * Tracepoint about memory that was allocated just-in-time being freed 296*4882a593Smuzhiyun * (which may happen either on free soft-job, or during rollback error 297*4882a593Smuzhiyun * paths of an allocation soft-job, etc) 298*4882a593Smuzhiyun * 299*4882a593Smuzhiyun * Free doesn't immediately have the just-in-time memory allocation ID so 300*4882a593Smuzhiyun * it's currently suppressed from the output - set jit_id to 0 301*4882a593Smuzhiyun */ 302*4882a593Smuzhiyun DEFINE_EVENT_PRINT(mali_jit_softjob_template, mali_jit_free, 303*4882a593Smuzhiyun TP_PROTO(struct kbase_va_region *reg, u8 jit_id), 304*4882a593Smuzhiyun TP_ARGS(reg, jit_id), 305*4882a593Smuzhiyun TP_printk("start=0x%llx va_pages=0x%zx backed_size=0x%zx", 306*4882a593Smuzhiyun __entry->start_addr, __entry->nr_pages, __entry->backed_pages)); 307*4882a593Smuzhiyun 308*4882a593Smuzhiyun #if !MALI_USE_CSF 309*4882a593Smuzhiyun #if MALI_JIT_PRESSURE_LIMIT_BASE 310*4882a593Smuzhiyun /* trace_mali_jit_report 311*4882a593Smuzhiyun * 312*4882a593Smuzhiyun * Tracepoint about the GPU data structure read to form a just-in-time memory 313*4882a593Smuzhiyun * allocation report, and its calculated physical page usage 314*4882a593Smuzhiyun */ 315*4882a593Smuzhiyun TRACE_EVENT(mali_jit_report, 316*4882a593Smuzhiyun TP_PROTO(struct kbase_jd_atom *katom, struct kbase_va_region *reg, 317*4882a593Smuzhiyun unsigned int id_idx, u64 read_val, u64 used_pages), 318*4882a593Smuzhiyun TP_ARGS(katom, reg, id_idx, read_val, used_pages), 319*4882a593Smuzhiyun TP_STRUCT__entry( 320*4882a593Smuzhiyun __field(u64, start_addr) 321*4882a593Smuzhiyun __field(u64, read_val) 322*4882a593Smuzhiyun __field(u64, used_pages) 323*4882a593Smuzhiyun __field(unsigned long, flags) 324*4882a593Smuzhiyun __field(u8, id_idx) 325*4882a593Smuzhiyun __field(u8, jit_id) 326*4882a593Smuzhiyun ), 327*4882a593Smuzhiyun TP_fast_assign( 328*4882a593Smuzhiyun __entry->start_addr = ((u64)reg->start_pfn) << PAGE_SHIFT; 329*4882a593Smuzhiyun __entry->read_val = read_val; 330*4882a593Smuzhiyun __entry->used_pages = used_pages; 331*4882a593Smuzhiyun __entry->flags = reg->flags; 332*4882a593Smuzhiyun __entry->id_idx = id_idx; 333*4882a593Smuzhiyun __entry->jit_id = katom->jit_ids[id_idx]; 334*4882a593Smuzhiyun ), 335*4882a593Smuzhiyun TP_printk("start=0x%llx jit_ids[%u]=%u read_type='%s' read_val=0x%llx used_pages=%llu", 336*4882a593Smuzhiyun __entry->start_addr, __entry->id_idx, __entry->jit_id, 337*4882a593Smuzhiyun __print_symbolic(__entry->flags, 338*4882a593Smuzhiyun { 0, "address"}, 339*4882a593Smuzhiyun { KBASE_REG_TILER_ALIGN_TOP, "address with align" }, 340*4882a593Smuzhiyun { KBASE_REG_HEAP_INFO_IS_SIZE, "size" }, 341*4882a593Smuzhiyun { KBASE_REG_HEAP_INFO_IS_SIZE | 342*4882a593Smuzhiyun KBASE_REG_TILER_ALIGN_TOP, 343*4882a593Smuzhiyun "size with align (invalid)" } 344*4882a593Smuzhiyun ), 345*4882a593Smuzhiyun __entry->read_val, __entry->used_pages) 346*4882a593Smuzhiyun ); 347*4882a593Smuzhiyun #endif /* MALI_JIT_PRESSURE_LIMIT_BASE */ 348*4882a593Smuzhiyun #endif /* !MALI_USE_CSF */ 349*4882a593Smuzhiyun 350*4882a593Smuzhiyun TRACE_DEFINE_ENUM(KBASE_JIT_REPORT_ON_ALLOC_OR_FREE); 351*4882a593Smuzhiyun #if MALI_JIT_PRESSURE_LIMIT_BASE 352*4882a593Smuzhiyun /* trace_mali_jit_report_pressure 353*4882a593Smuzhiyun * 354*4882a593Smuzhiyun * Tracepoint about change in physical memory pressure, due to the information 355*4882a593Smuzhiyun * about a region changing. Examples include: 356*4882a593Smuzhiyun * - a report on a region that was allocated just-in-time 357*4882a593Smuzhiyun * - just-in-time allocation of a region 358*4882a593Smuzhiyun * - free of a region that was allocated just-in-time 359*4882a593Smuzhiyun */ 360*4882a593Smuzhiyun TRACE_EVENT(mali_jit_report_pressure, 361*4882a593Smuzhiyun TP_PROTO(struct kbase_va_region *reg, u64 new_used_pages, 362*4882a593Smuzhiyun u64 new_pressure, unsigned int flags), 363*4882a593Smuzhiyun TP_ARGS(reg, new_used_pages, new_pressure, flags), 364*4882a593Smuzhiyun TP_STRUCT__entry( 365*4882a593Smuzhiyun __field(u64, start_addr) 366*4882a593Smuzhiyun __field(u64, used_pages) 367*4882a593Smuzhiyun __field(u64, new_used_pages) 368*4882a593Smuzhiyun __field(u64, new_pressure) 369*4882a593Smuzhiyun __field(unsigned int, flags) 370*4882a593Smuzhiyun ), 371*4882a593Smuzhiyun TP_fast_assign( 372*4882a593Smuzhiyun __entry->start_addr = ((u64)reg->start_pfn) << PAGE_SHIFT; 373*4882a593Smuzhiyun __entry->used_pages = reg->used_pages; 374*4882a593Smuzhiyun __entry->new_used_pages = new_used_pages; 375*4882a593Smuzhiyun __entry->new_pressure = new_pressure; 376*4882a593Smuzhiyun __entry->flags = flags; 377*4882a593Smuzhiyun ), 378*4882a593Smuzhiyun TP_printk("start=0x%llx old_used_pages=%llu new_used_pages=%llu new_pressure=%llu report_flags=%s", 379*4882a593Smuzhiyun __entry->start_addr, __entry->used_pages, 380*4882a593Smuzhiyun __entry->new_used_pages, __entry->new_pressure, 381*4882a593Smuzhiyun __print_flags(__entry->flags, "|", 382*4882a593Smuzhiyun { KBASE_JIT_REPORT_ON_ALLOC_OR_FREE, 383*4882a593Smuzhiyun "HAPPENED_ON_ALLOC_OR_FREE" })) 384*4882a593Smuzhiyun ); 385*4882a593Smuzhiyun #endif /* MALI_JIT_PRESSURE_LIMIT_BASE */ 386*4882a593Smuzhiyun 387*4882a593Smuzhiyun #ifndef __TRACE_SYSGRAPH_ENUM 388*4882a593Smuzhiyun #define __TRACE_SYSGRAPH_ENUM 389*4882a593Smuzhiyun /* Enum of sysgraph message IDs */ 390*4882a593Smuzhiyun enum sysgraph_msg { 391*4882a593Smuzhiyun SGR_ARRIVE, 392*4882a593Smuzhiyun SGR_SUBMIT, 393*4882a593Smuzhiyun SGR_COMPLETE, 394*4882a593Smuzhiyun SGR_POST, 395*4882a593Smuzhiyun SGR_ACTIVE, 396*4882a593Smuzhiyun SGR_INACTIVE 397*4882a593Smuzhiyun }; 398*4882a593Smuzhiyun #endif /* __TRACE_SYSGRAPH_ENUM */ 399*4882a593Smuzhiyun 400*4882a593Smuzhiyun /* A template for SYSGRAPH events 401*4882a593Smuzhiyun * 402*4882a593Smuzhiyun * Most of the sysgraph events contain only one input argument 403*4882a593Smuzhiyun * which is atom_id therefore they will be using a common template 404*4882a593Smuzhiyun */ 405*4882a593Smuzhiyun TRACE_EVENT(sysgraph, 406*4882a593Smuzhiyun TP_PROTO(enum sysgraph_msg message, unsigned int proc_id, 407*4882a593Smuzhiyun unsigned int atom_id), 408*4882a593Smuzhiyun TP_ARGS(message, proc_id, atom_id), 409*4882a593Smuzhiyun TP_STRUCT__entry( 410*4882a593Smuzhiyun __field(unsigned int, proc_id) 411*4882a593Smuzhiyun __field(enum sysgraph_msg, message) 412*4882a593Smuzhiyun __field(unsigned int, atom_id) 413*4882a593Smuzhiyun ), 414*4882a593Smuzhiyun TP_fast_assign( 415*4882a593Smuzhiyun __entry->proc_id = proc_id; 416*4882a593Smuzhiyun __entry->message = message; 417*4882a593Smuzhiyun __entry->atom_id = atom_id; 418*4882a593Smuzhiyun ), 419*4882a593Smuzhiyun TP_printk("msg=%u proc_id=%u, param1=%d", __entry->message, 420*4882a593Smuzhiyun __entry->proc_id, __entry->atom_id) 421*4882a593Smuzhiyun ); 422*4882a593Smuzhiyun 423*4882a593Smuzhiyun /* A template for SYSGRAPH GPU events 424*4882a593Smuzhiyun * 425*4882a593Smuzhiyun * Sysgraph events that record start/complete events 426*4882a593Smuzhiyun * on GPU also record a js value in addition to the 427*4882a593Smuzhiyun * atom id. 428*4882a593Smuzhiyun */ 429*4882a593Smuzhiyun TRACE_EVENT(sysgraph_gpu, 430*4882a593Smuzhiyun TP_PROTO(enum sysgraph_msg message, unsigned int proc_id, 431*4882a593Smuzhiyun unsigned int atom_id, unsigned int js), 432*4882a593Smuzhiyun TP_ARGS(message, proc_id, atom_id, js), 433*4882a593Smuzhiyun TP_STRUCT__entry( 434*4882a593Smuzhiyun __field(unsigned int, proc_id) 435*4882a593Smuzhiyun __field(enum sysgraph_msg, message) 436*4882a593Smuzhiyun __field(unsigned int, atom_id) 437*4882a593Smuzhiyun __field(unsigned int, js) 438*4882a593Smuzhiyun ), 439*4882a593Smuzhiyun TP_fast_assign( 440*4882a593Smuzhiyun __entry->proc_id = proc_id; 441*4882a593Smuzhiyun __entry->message = message; 442*4882a593Smuzhiyun __entry->atom_id = atom_id; 443*4882a593Smuzhiyun __entry->js = js; 444*4882a593Smuzhiyun ), 445*4882a593Smuzhiyun TP_printk("msg=%u proc_id=%u, param1=%d, param2=%d", 446*4882a593Smuzhiyun __entry->message, __entry->proc_id, 447*4882a593Smuzhiyun __entry->atom_id, __entry->js) 448*4882a593Smuzhiyun ); 449*4882a593Smuzhiyun 450*4882a593Smuzhiyun /* Tracepoint files get included more than once - protect against multiple 451*4882a593Smuzhiyun * definition 452*4882a593Smuzhiyun */ 453*4882a593Smuzhiyun #undef KBASE_JIT_REPORT_GPU_MEM_SIZE 454*4882a593Smuzhiyun 455*4882a593Smuzhiyun /* Size in bytes of the memory surrounding the location used for a just-in-time 456*4882a593Smuzhiyun * memory allocation report 457*4882a593Smuzhiyun */ 458*4882a593Smuzhiyun #define KBASE_JIT_REPORT_GPU_MEM_SIZE (4 * sizeof(u64)) 459*4882a593Smuzhiyun 460*4882a593Smuzhiyun /* trace_mali_jit_report_gpu_mem 461*4882a593Smuzhiyun * 462*4882a593Smuzhiyun * Tracepoint about the GPU memory nearby the location used for a just-in-time 463*4882a593Smuzhiyun * memory allocation report 464*4882a593Smuzhiyun */ 465*4882a593Smuzhiyun TRACE_EVENT(mali_jit_report_gpu_mem, 466*4882a593Smuzhiyun TP_PROTO(u64 base_addr, u64 reg_addr, u64 *gpu_mem, unsigned int flags), 467*4882a593Smuzhiyun TP_ARGS(base_addr, reg_addr, gpu_mem, flags), 468*4882a593Smuzhiyun TP_STRUCT__entry( 469*4882a593Smuzhiyun __field(u64, base_addr) 470*4882a593Smuzhiyun __field(u64, reg_addr) 471*4882a593Smuzhiyun __array(u64, mem_values, 472*4882a593Smuzhiyun KBASE_JIT_REPORT_GPU_MEM_SIZE / sizeof(u64)) 473*4882a593Smuzhiyun __field(unsigned int, flags) 474*4882a593Smuzhiyun ), 475*4882a593Smuzhiyun TP_fast_assign( 476*4882a593Smuzhiyun __entry->base_addr = base_addr; 477*4882a593Smuzhiyun __entry->reg_addr = reg_addr; 478*4882a593Smuzhiyun memcpy(__entry->mem_values, gpu_mem, 479*4882a593Smuzhiyun sizeof(__entry->mem_values)); 480*4882a593Smuzhiyun __entry->flags = flags; 481*4882a593Smuzhiyun ), 482*4882a593Smuzhiyun TP_printk("start=0x%llx read GPU memory base=0x%llx values=%s report_flags=%s", 483*4882a593Smuzhiyun __entry->reg_addr, __entry->base_addr, 484*4882a593Smuzhiyun __print_array(__entry->mem_values, 485*4882a593Smuzhiyun ARRAY_SIZE(__entry->mem_values), sizeof(u64)), 486*4882a593Smuzhiyun __print_flags(__entry->flags, "|", 487*4882a593Smuzhiyun { KBASE_JIT_REPORT_ON_ALLOC_OR_FREE, 488*4882a593Smuzhiyun "HAPPENED_ON_ALLOC_OR_FREE" })) 489*4882a593Smuzhiyun ); 490*4882a593Smuzhiyun 491*4882a593Smuzhiyun /* trace_mali_jit_trim_from_region 492*4882a593Smuzhiyun * 493*4882a593Smuzhiyun * Tracepoint about trimming physical pages from a region 494*4882a593Smuzhiyun */ 495*4882a593Smuzhiyun TRACE_EVENT(mali_jit_trim_from_region, 496*4882a593Smuzhiyun TP_PROTO(struct kbase_va_region *reg, size_t freed_pages, 497*4882a593Smuzhiyun size_t old_pages, size_t available_pages, size_t new_pages), 498*4882a593Smuzhiyun TP_ARGS(reg, freed_pages, old_pages, available_pages, new_pages), 499*4882a593Smuzhiyun TP_STRUCT__entry( 500*4882a593Smuzhiyun __field(u64, start_addr) 501*4882a593Smuzhiyun __field(size_t, freed_pages) 502*4882a593Smuzhiyun __field(size_t, old_pages) 503*4882a593Smuzhiyun __field(size_t, available_pages) 504*4882a593Smuzhiyun __field(size_t, new_pages) 505*4882a593Smuzhiyun ), 506*4882a593Smuzhiyun TP_fast_assign( 507*4882a593Smuzhiyun __entry->start_addr = ((u64)reg->start_pfn) << PAGE_SHIFT; 508*4882a593Smuzhiyun __entry->freed_pages = freed_pages; 509*4882a593Smuzhiyun __entry->old_pages = old_pages; 510*4882a593Smuzhiyun __entry->available_pages = available_pages; 511*4882a593Smuzhiyun __entry->new_pages = new_pages; 512*4882a593Smuzhiyun ), 513*4882a593Smuzhiyun TP_printk("start=0x%llx freed_pages=%zu old_pages=%zu available_pages=%zu new_pages=%zu", 514*4882a593Smuzhiyun __entry->start_addr, __entry->freed_pages, __entry->old_pages, 515*4882a593Smuzhiyun __entry->available_pages, __entry->new_pages) 516*4882a593Smuzhiyun ); 517*4882a593Smuzhiyun 518*4882a593Smuzhiyun /* trace_mali_jit_trim 519*4882a593Smuzhiyun * 520*4882a593Smuzhiyun * Tracepoint about total trimmed physical pages 521*4882a593Smuzhiyun */ 522*4882a593Smuzhiyun TRACE_EVENT(mali_jit_trim, 523*4882a593Smuzhiyun TP_PROTO(size_t freed_pages), 524*4882a593Smuzhiyun TP_ARGS(freed_pages), 525*4882a593Smuzhiyun TP_STRUCT__entry( 526*4882a593Smuzhiyun __field(size_t, freed_pages) 527*4882a593Smuzhiyun ), 528*4882a593Smuzhiyun TP_fast_assign( 529*4882a593Smuzhiyun __entry->freed_pages = freed_pages; 530*4882a593Smuzhiyun ), 531*4882a593Smuzhiyun TP_printk("freed_pages=%zu", __entry->freed_pages) 532*4882a593Smuzhiyun ); 533*4882a593Smuzhiyun 534*4882a593Smuzhiyun #include "debug/mali_kbase_debug_linux_ktrace.h" 535*4882a593Smuzhiyun 536*4882a593Smuzhiyun #endif /* _TRACE_MALI_H */ 537*4882a593Smuzhiyun 538*4882a593Smuzhiyun #undef TRACE_INCLUDE_PATH 539*4882a593Smuzhiyun /* lwn.net/Articles/383362 suggests this should remain as '.', and instead 540*4882a593Smuzhiyun * extend CFLAGS 541*4882a593Smuzhiyun */ 542*4882a593Smuzhiyun #define TRACE_INCLUDE_PATH . 543*4882a593Smuzhiyun #undef TRACE_INCLUDE_FILE 544*4882a593Smuzhiyun #define TRACE_INCLUDE_FILE mali_linux_trace 545*4882a593Smuzhiyun 546*4882a593Smuzhiyun /* This part must be outside protection */ 547*4882a593Smuzhiyun #include <trace/define_trace.h> 548