1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) 3*4882a593Smuzhiyun #define _TRACE_KVM_H 4*4882a593Smuzhiyun 5*4882a593Smuzhiyun #include <linux/tracepoint.h> 6*4882a593Smuzhiyun #include <asm/vmx.h> 7*4882a593Smuzhiyun #include <asm/svm.h> 8*4882a593Smuzhiyun #include <asm/clocksource.h> 9*4882a593Smuzhiyun #include <asm/pvclock-abi.h> 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun #undef TRACE_SYSTEM 12*4882a593Smuzhiyun #define TRACE_SYSTEM kvm 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun /* 15*4882a593Smuzhiyun * Tracepoint for guest mode entry. 16*4882a593Smuzhiyun */ 17*4882a593Smuzhiyun TRACE_EVENT(kvm_entry, 18*4882a593Smuzhiyun TP_PROTO(struct kvm_vcpu *vcpu), 19*4882a593Smuzhiyun TP_ARGS(vcpu), 20*4882a593Smuzhiyun 21*4882a593Smuzhiyun TP_STRUCT__entry( 22*4882a593Smuzhiyun __field( unsigned int, vcpu_id ) 23*4882a593Smuzhiyun __field( unsigned long, rip ) 24*4882a593Smuzhiyun ), 25*4882a593Smuzhiyun 26*4882a593Smuzhiyun TP_fast_assign( 27*4882a593Smuzhiyun __entry->vcpu_id = vcpu->vcpu_id; 28*4882a593Smuzhiyun __entry->rip = kvm_rip_read(vcpu); 29*4882a593Smuzhiyun ), 30*4882a593Smuzhiyun 31*4882a593Smuzhiyun TP_printk("vcpu %u, rip 0x%lx", __entry->vcpu_id, __entry->rip) 32*4882a593Smuzhiyun ); 33*4882a593Smuzhiyun 34*4882a593Smuzhiyun /* 35*4882a593Smuzhiyun * Tracepoint for hypercall. 36*4882a593Smuzhiyun */ 37*4882a593Smuzhiyun TRACE_EVENT(kvm_hypercall, 38*4882a593Smuzhiyun TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, 39*4882a593Smuzhiyun unsigned long a2, unsigned long a3), 40*4882a593Smuzhiyun TP_ARGS(nr, a0, a1, a2, a3), 41*4882a593Smuzhiyun 42*4882a593Smuzhiyun TP_STRUCT__entry( 43*4882a593Smuzhiyun __field( unsigned long, nr ) 44*4882a593Smuzhiyun __field( unsigned long, a0 ) 45*4882a593Smuzhiyun __field( unsigned long, a1 ) 46*4882a593Smuzhiyun __field( unsigned long, a2 ) 47*4882a593Smuzhiyun __field( unsigned long, a3 ) 48*4882a593Smuzhiyun ), 49*4882a593Smuzhiyun 50*4882a593Smuzhiyun TP_fast_assign( 51*4882a593Smuzhiyun __entry->nr = nr; 52*4882a593Smuzhiyun __entry->a0 = a0; 53*4882a593Smuzhiyun __entry->a1 = a1; 54*4882a593Smuzhiyun __entry->a2 = a2; 55*4882a593Smuzhiyun __entry->a3 = a3; 56*4882a593Smuzhiyun ), 57*4882a593Smuzhiyun 58*4882a593Smuzhiyun TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx", 59*4882a593Smuzhiyun __entry->nr, __entry->a0, __entry->a1, __entry->a2, 60*4882a593Smuzhiyun __entry->a3) 61*4882a593Smuzhiyun ); 62*4882a593Smuzhiyun 63*4882a593Smuzhiyun /* 64*4882a593Smuzhiyun * Tracepoint for hypercall. 65*4882a593Smuzhiyun */ 66*4882a593Smuzhiyun TRACE_EVENT(kvm_hv_hypercall, 67*4882a593Smuzhiyun TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx, 68*4882a593Smuzhiyun __u64 ingpa, __u64 outgpa), 69*4882a593Smuzhiyun TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa), 70*4882a593Smuzhiyun 71*4882a593Smuzhiyun TP_STRUCT__entry( 72*4882a593Smuzhiyun __field( __u16, rep_cnt ) 73*4882a593Smuzhiyun __field( __u16, rep_idx ) 74*4882a593Smuzhiyun __field( __u64, ingpa ) 75*4882a593Smuzhiyun __field( __u64, outgpa ) 76*4882a593Smuzhiyun __field( __u16, code ) 77*4882a593Smuzhiyun __field( bool, fast ) 78*4882a593Smuzhiyun ), 79*4882a593Smuzhiyun 80*4882a593Smuzhiyun TP_fast_assign( 81*4882a593Smuzhiyun __entry->rep_cnt = rep_cnt; 82*4882a593Smuzhiyun __entry->rep_idx = rep_idx; 83*4882a593Smuzhiyun __entry->ingpa = ingpa; 84*4882a593Smuzhiyun __entry->outgpa = outgpa; 85*4882a593Smuzhiyun __entry->code = code; 86*4882a593Smuzhiyun __entry->fast = fast; 87*4882a593Smuzhiyun ), 88*4882a593Smuzhiyun 89*4882a593Smuzhiyun TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx", 90*4882a593Smuzhiyun __entry->code, __entry->fast ? "fast" : "slow", 91*4882a593Smuzhiyun __entry->rep_cnt, __entry->rep_idx, __entry->ingpa, 92*4882a593Smuzhiyun __entry->outgpa) 93*4882a593Smuzhiyun ); 94*4882a593Smuzhiyun 95*4882a593Smuzhiyun /* 96*4882a593Smuzhiyun * Tracepoint for PIO. 97*4882a593Smuzhiyun */ 98*4882a593Smuzhiyun 99*4882a593Smuzhiyun #define KVM_PIO_IN 0 100*4882a593Smuzhiyun #define KVM_PIO_OUT 1 101*4882a593Smuzhiyun 102*4882a593Smuzhiyun TRACE_EVENT(kvm_pio, 103*4882a593Smuzhiyun TP_PROTO(unsigned int rw, unsigned int port, unsigned int size, 104*4882a593Smuzhiyun unsigned int count, void *data), 105*4882a593Smuzhiyun TP_ARGS(rw, port, size, count, data), 106*4882a593Smuzhiyun 107*4882a593Smuzhiyun TP_STRUCT__entry( 108*4882a593Smuzhiyun __field( unsigned int, rw ) 109*4882a593Smuzhiyun __field( unsigned int, port ) 110*4882a593Smuzhiyun __field( unsigned int, size ) 111*4882a593Smuzhiyun __field( unsigned int, count ) 112*4882a593Smuzhiyun __field( unsigned int, val ) 113*4882a593Smuzhiyun ), 114*4882a593Smuzhiyun 115*4882a593Smuzhiyun TP_fast_assign( 116*4882a593Smuzhiyun __entry->rw = rw; 117*4882a593Smuzhiyun __entry->port = port; 118*4882a593Smuzhiyun __entry->size = size; 119*4882a593Smuzhiyun __entry->count = count; 120*4882a593Smuzhiyun if (size == 1) 121*4882a593Smuzhiyun __entry->val = *(unsigned char *)data; 122*4882a593Smuzhiyun else if (size == 2) 123*4882a593Smuzhiyun __entry->val = *(unsigned short *)data; 124*4882a593Smuzhiyun else 125*4882a593Smuzhiyun __entry->val = *(unsigned int *)data; 126*4882a593Smuzhiyun ), 127*4882a593Smuzhiyun 128*4882a593Smuzhiyun TP_printk("pio_%s at 0x%x size %d count %d val 0x%x %s", 129*4882a593Smuzhiyun __entry->rw ? "write" : "read", 130*4882a593Smuzhiyun __entry->port, __entry->size, __entry->count, __entry->val, 131*4882a593Smuzhiyun __entry->count > 1 ? "(...)" : "") 132*4882a593Smuzhiyun ); 133*4882a593Smuzhiyun 134*4882a593Smuzhiyun /* 135*4882a593Smuzhiyun * Tracepoint for fast mmio. 136*4882a593Smuzhiyun */ 137*4882a593Smuzhiyun TRACE_EVENT(kvm_fast_mmio, 138*4882a593Smuzhiyun TP_PROTO(u64 gpa), 139*4882a593Smuzhiyun TP_ARGS(gpa), 140*4882a593Smuzhiyun 141*4882a593Smuzhiyun TP_STRUCT__entry( 142*4882a593Smuzhiyun __field(u64, gpa) 143*4882a593Smuzhiyun ), 144*4882a593Smuzhiyun 145*4882a593Smuzhiyun TP_fast_assign( 146*4882a593Smuzhiyun __entry->gpa = gpa; 147*4882a593Smuzhiyun ), 148*4882a593Smuzhiyun 149*4882a593Smuzhiyun TP_printk("fast mmio at gpa 0x%llx", __entry->gpa) 150*4882a593Smuzhiyun ); 151*4882a593Smuzhiyun 152*4882a593Smuzhiyun /* 153*4882a593Smuzhiyun * Tracepoint for cpuid. 154*4882a593Smuzhiyun */ 155*4882a593Smuzhiyun TRACE_EVENT(kvm_cpuid, 156*4882a593Smuzhiyun TP_PROTO(unsigned int function, unsigned int index, unsigned long rax, 157*4882a593Smuzhiyun unsigned long rbx, unsigned long rcx, unsigned long rdx, 158*4882a593Smuzhiyun bool found, bool used_max_basic), 159*4882a593Smuzhiyun TP_ARGS(function, index, rax, rbx, rcx, rdx, found, used_max_basic), 160*4882a593Smuzhiyun 161*4882a593Smuzhiyun TP_STRUCT__entry( 162*4882a593Smuzhiyun __field( unsigned int, function ) 163*4882a593Smuzhiyun __field( unsigned int, index ) 164*4882a593Smuzhiyun __field( unsigned long, rax ) 165*4882a593Smuzhiyun __field( unsigned long, rbx ) 166*4882a593Smuzhiyun __field( unsigned long, rcx ) 167*4882a593Smuzhiyun __field( unsigned long, rdx ) 168*4882a593Smuzhiyun __field( bool, found ) 169*4882a593Smuzhiyun __field( bool, used_max_basic ) 170*4882a593Smuzhiyun ), 171*4882a593Smuzhiyun 172*4882a593Smuzhiyun TP_fast_assign( 173*4882a593Smuzhiyun __entry->function = function; 174*4882a593Smuzhiyun __entry->index = index; 175*4882a593Smuzhiyun __entry->rax = rax; 176*4882a593Smuzhiyun __entry->rbx = rbx; 177*4882a593Smuzhiyun __entry->rcx = rcx; 178*4882a593Smuzhiyun __entry->rdx = rdx; 179*4882a593Smuzhiyun __entry->found = found; 180*4882a593Smuzhiyun __entry->used_max_basic = used_max_basic; 181*4882a593Smuzhiyun ), 182*4882a593Smuzhiyun 183*4882a593Smuzhiyun TP_printk("func %x idx %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s%s", 184*4882a593Smuzhiyun __entry->function, __entry->index, __entry->rax, 185*4882a593Smuzhiyun __entry->rbx, __entry->rcx, __entry->rdx, 186*4882a593Smuzhiyun __entry->found ? "found" : "not found", 187*4882a593Smuzhiyun __entry->used_max_basic ? ", used max basic" : "") 188*4882a593Smuzhiyun ); 189*4882a593Smuzhiyun 190*4882a593Smuzhiyun #define AREG(x) { APIC_##x, "APIC_" #x } 191*4882a593Smuzhiyun 192*4882a593Smuzhiyun #define kvm_trace_symbol_apic \ 193*4882a593Smuzhiyun AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI), \ 194*4882a593Smuzhiyun AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR), \ 195*4882a593Smuzhiyun AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \ 196*4882a593Smuzhiyun AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR), \ 197*4882a593Smuzhiyun AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT), \ 198*4882a593Smuzhiyun AREG(ECTRL) 199*4882a593Smuzhiyun /* 200*4882a593Smuzhiyun * Tracepoint for apic access. 201*4882a593Smuzhiyun */ 202*4882a593Smuzhiyun TRACE_EVENT(kvm_apic, 203*4882a593Smuzhiyun TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val), 204*4882a593Smuzhiyun TP_ARGS(rw, reg, val), 205*4882a593Smuzhiyun 206*4882a593Smuzhiyun TP_STRUCT__entry( 207*4882a593Smuzhiyun __field( unsigned int, rw ) 208*4882a593Smuzhiyun __field( unsigned int, reg ) 209*4882a593Smuzhiyun __field( unsigned int, val ) 210*4882a593Smuzhiyun ), 211*4882a593Smuzhiyun 212*4882a593Smuzhiyun TP_fast_assign( 213*4882a593Smuzhiyun __entry->rw = rw; 214*4882a593Smuzhiyun __entry->reg = reg; 215*4882a593Smuzhiyun __entry->val = val; 216*4882a593Smuzhiyun ), 217*4882a593Smuzhiyun 218*4882a593Smuzhiyun TP_printk("apic_%s %s = 0x%x", 219*4882a593Smuzhiyun __entry->rw ? "write" : "read", 220*4882a593Smuzhiyun __print_symbolic(__entry->reg, kvm_trace_symbol_apic), 221*4882a593Smuzhiyun __entry->val) 222*4882a593Smuzhiyun ); 223*4882a593Smuzhiyun 224*4882a593Smuzhiyun #define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val) 225*4882a593Smuzhiyun #define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val) 226*4882a593Smuzhiyun 227*4882a593Smuzhiyun #define KVM_ISA_VMX 1 228*4882a593Smuzhiyun #define KVM_ISA_SVM 2 229*4882a593Smuzhiyun 230*4882a593Smuzhiyun #define kvm_print_exit_reason(exit_reason, isa) \ 231*4882a593Smuzhiyun (isa == KVM_ISA_VMX) ? \ 232*4882a593Smuzhiyun __print_symbolic(exit_reason & 0xffff, VMX_EXIT_REASONS) : \ 233*4882a593Smuzhiyun __print_symbolic(exit_reason, SVM_EXIT_REASONS), \ 234*4882a593Smuzhiyun (isa == KVM_ISA_VMX && exit_reason & ~0xffff) ? " " : "", \ 235*4882a593Smuzhiyun (isa == KVM_ISA_VMX) ? \ 236*4882a593Smuzhiyun __print_flags(exit_reason & ~0xffff, " ", VMX_EXIT_REASON_FLAGS) : "" 237*4882a593Smuzhiyun 238*4882a593Smuzhiyun #define TRACE_EVENT_KVM_EXIT(name) \ 239*4882a593Smuzhiyun TRACE_EVENT(name, \ 240*4882a593Smuzhiyun TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa), \ 241*4882a593Smuzhiyun TP_ARGS(exit_reason, vcpu, isa), \ 242*4882a593Smuzhiyun \ 243*4882a593Smuzhiyun TP_STRUCT__entry( \ 244*4882a593Smuzhiyun __field( unsigned int, exit_reason ) \ 245*4882a593Smuzhiyun __field( unsigned long, guest_rip ) \ 246*4882a593Smuzhiyun __field( u32, isa ) \ 247*4882a593Smuzhiyun __field( u64, info1 ) \ 248*4882a593Smuzhiyun __field( u64, info2 ) \ 249*4882a593Smuzhiyun __field( u32, intr_info ) \ 250*4882a593Smuzhiyun __field( u32, error_code ) \ 251*4882a593Smuzhiyun __field( unsigned int, vcpu_id ) \ 252*4882a593Smuzhiyun ), \ 253*4882a593Smuzhiyun \ 254*4882a593Smuzhiyun TP_fast_assign( \ 255*4882a593Smuzhiyun __entry->exit_reason = exit_reason; \ 256*4882a593Smuzhiyun __entry->guest_rip = kvm_rip_read(vcpu); \ 257*4882a593Smuzhiyun __entry->isa = isa; \ 258*4882a593Smuzhiyun __entry->vcpu_id = vcpu->vcpu_id; \ 259*4882a593Smuzhiyun kvm_x86_ops.get_exit_info(vcpu, &__entry->info1, \ 260*4882a593Smuzhiyun &__entry->info2, \ 261*4882a593Smuzhiyun &__entry->intr_info, \ 262*4882a593Smuzhiyun &__entry->error_code); \ 263*4882a593Smuzhiyun ), \ 264*4882a593Smuzhiyun \ 265*4882a593Smuzhiyun TP_printk("vcpu %u reason %s%s%s rip 0x%lx info1 0x%016llx " \ 266*4882a593Smuzhiyun "info2 0x%016llx intr_info 0x%08x error_code 0x%08x", \ 267*4882a593Smuzhiyun __entry->vcpu_id, \ 268*4882a593Smuzhiyun kvm_print_exit_reason(__entry->exit_reason, __entry->isa), \ 269*4882a593Smuzhiyun __entry->guest_rip, __entry->info1, __entry->info2, \ 270*4882a593Smuzhiyun __entry->intr_info, __entry->error_code) \ 271*4882a593Smuzhiyun ) 272*4882a593Smuzhiyun 273*4882a593Smuzhiyun /* 274*4882a593Smuzhiyun * Tracepoint for kvm guest exit: 275*4882a593Smuzhiyun */ 276*4882a593Smuzhiyun TRACE_EVENT_KVM_EXIT(kvm_exit); 277*4882a593Smuzhiyun 278*4882a593Smuzhiyun /* 279*4882a593Smuzhiyun * Tracepoint for kvm interrupt injection: 280*4882a593Smuzhiyun */ 281*4882a593Smuzhiyun TRACE_EVENT(kvm_inj_virq, 282*4882a593Smuzhiyun TP_PROTO(unsigned int irq), 283*4882a593Smuzhiyun TP_ARGS(irq), 284*4882a593Smuzhiyun 285*4882a593Smuzhiyun TP_STRUCT__entry( 286*4882a593Smuzhiyun __field( unsigned int, irq ) 287*4882a593Smuzhiyun ), 288*4882a593Smuzhiyun 289*4882a593Smuzhiyun TP_fast_assign( 290*4882a593Smuzhiyun __entry->irq = irq; 291*4882a593Smuzhiyun ), 292*4882a593Smuzhiyun 293*4882a593Smuzhiyun TP_printk("irq %u", __entry->irq) 294*4882a593Smuzhiyun ); 295*4882a593Smuzhiyun 296*4882a593Smuzhiyun #define EXS(x) { x##_VECTOR, "#" #x } 297*4882a593Smuzhiyun 298*4882a593Smuzhiyun #define kvm_trace_sym_exc \ 299*4882a593Smuzhiyun EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \ 300*4882a593Smuzhiyun EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \ 301*4882a593Smuzhiyun EXS(MF), EXS(AC), EXS(MC) 302*4882a593Smuzhiyun 303*4882a593Smuzhiyun /* 304*4882a593Smuzhiyun * Tracepoint for kvm interrupt injection: 305*4882a593Smuzhiyun */ 306*4882a593Smuzhiyun TRACE_EVENT(kvm_inj_exception, 307*4882a593Smuzhiyun TP_PROTO(unsigned exception, bool has_error, unsigned error_code, 308*4882a593Smuzhiyun bool reinjected), 309*4882a593Smuzhiyun TP_ARGS(exception, has_error, error_code, reinjected), 310*4882a593Smuzhiyun 311*4882a593Smuzhiyun TP_STRUCT__entry( 312*4882a593Smuzhiyun __field( u8, exception ) 313*4882a593Smuzhiyun __field( u8, has_error ) 314*4882a593Smuzhiyun __field( u32, error_code ) 315*4882a593Smuzhiyun __field( bool, reinjected ) 316*4882a593Smuzhiyun ), 317*4882a593Smuzhiyun 318*4882a593Smuzhiyun TP_fast_assign( 319*4882a593Smuzhiyun __entry->exception = exception; 320*4882a593Smuzhiyun __entry->has_error = has_error; 321*4882a593Smuzhiyun __entry->error_code = error_code; 322*4882a593Smuzhiyun __entry->reinjected = reinjected; 323*4882a593Smuzhiyun ), 324*4882a593Smuzhiyun 325*4882a593Smuzhiyun TP_printk("%s (0x%x)%s", 326*4882a593Smuzhiyun __print_symbolic(__entry->exception, kvm_trace_sym_exc), 327*4882a593Smuzhiyun /* FIXME: don't print error_code if not present */ 328*4882a593Smuzhiyun __entry->has_error ? __entry->error_code : 0, 329*4882a593Smuzhiyun __entry->reinjected ? " [reinjected]" : "") 330*4882a593Smuzhiyun ); 331*4882a593Smuzhiyun 332*4882a593Smuzhiyun /* 333*4882a593Smuzhiyun * Tracepoint for page fault. 334*4882a593Smuzhiyun */ 335*4882a593Smuzhiyun TRACE_EVENT(kvm_page_fault, 336*4882a593Smuzhiyun TP_PROTO(unsigned long fault_address, unsigned int error_code), 337*4882a593Smuzhiyun TP_ARGS(fault_address, error_code), 338*4882a593Smuzhiyun 339*4882a593Smuzhiyun TP_STRUCT__entry( 340*4882a593Smuzhiyun __field( unsigned long, fault_address ) 341*4882a593Smuzhiyun __field( unsigned int, error_code ) 342*4882a593Smuzhiyun ), 343*4882a593Smuzhiyun 344*4882a593Smuzhiyun TP_fast_assign( 345*4882a593Smuzhiyun __entry->fault_address = fault_address; 346*4882a593Smuzhiyun __entry->error_code = error_code; 347*4882a593Smuzhiyun ), 348*4882a593Smuzhiyun 349*4882a593Smuzhiyun TP_printk("address %lx error_code %x", 350*4882a593Smuzhiyun __entry->fault_address, __entry->error_code) 351*4882a593Smuzhiyun ); 352*4882a593Smuzhiyun 353*4882a593Smuzhiyun /* 354*4882a593Smuzhiyun * Tracepoint for guest MSR access. 355*4882a593Smuzhiyun */ 356*4882a593Smuzhiyun TRACE_EVENT(kvm_msr, 357*4882a593Smuzhiyun TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception), 358*4882a593Smuzhiyun TP_ARGS(write, ecx, data, exception), 359*4882a593Smuzhiyun 360*4882a593Smuzhiyun TP_STRUCT__entry( 361*4882a593Smuzhiyun __field( unsigned, write ) 362*4882a593Smuzhiyun __field( u32, ecx ) 363*4882a593Smuzhiyun __field( u64, data ) 364*4882a593Smuzhiyun __field( u8, exception ) 365*4882a593Smuzhiyun ), 366*4882a593Smuzhiyun 367*4882a593Smuzhiyun TP_fast_assign( 368*4882a593Smuzhiyun __entry->write = write; 369*4882a593Smuzhiyun __entry->ecx = ecx; 370*4882a593Smuzhiyun __entry->data = data; 371*4882a593Smuzhiyun __entry->exception = exception; 372*4882a593Smuzhiyun ), 373*4882a593Smuzhiyun 374*4882a593Smuzhiyun TP_printk("msr_%s %x = 0x%llx%s", 375*4882a593Smuzhiyun __entry->write ? "write" : "read", 376*4882a593Smuzhiyun __entry->ecx, __entry->data, 377*4882a593Smuzhiyun __entry->exception ? " (#GP)" : "") 378*4882a593Smuzhiyun ); 379*4882a593Smuzhiyun 380*4882a593Smuzhiyun #define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data, false) 381*4882a593Smuzhiyun #define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data, false) 382*4882a593Smuzhiyun #define trace_kvm_msr_read_ex(ecx) trace_kvm_msr(0, ecx, 0, true) 383*4882a593Smuzhiyun #define trace_kvm_msr_write_ex(ecx, data) trace_kvm_msr(1, ecx, data, true) 384*4882a593Smuzhiyun 385*4882a593Smuzhiyun /* 386*4882a593Smuzhiyun * Tracepoint for guest CR access. 387*4882a593Smuzhiyun */ 388*4882a593Smuzhiyun TRACE_EVENT(kvm_cr, 389*4882a593Smuzhiyun TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val), 390*4882a593Smuzhiyun TP_ARGS(rw, cr, val), 391*4882a593Smuzhiyun 392*4882a593Smuzhiyun TP_STRUCT__entry( 393*4882a593Smuzhiyun __field( unsigned int, rw ) 394*4882a593Smuzhiyun __field( unsigned int, cr ) 395*4882a593Smuzhiyun __field( unsigned long, val ) 396*4882a593Smuzhiyun ), 397*4882a593Smuzhiyun 398*4882a593Smuzhiyun TP_fast_assign( 399*4882a593Smuzhiyun __entry->rw = rw; 400*4882a593Smuzhiyun __entry->cr = cr; 401*4882a593Smuzhiyun __entry->val = val; 402*4882a593Smuzhiyun ), 403*4882a593Smuzhiyun 404*4882a593Smuzhiyun TP_printk("cr_%s %x = 0x%lx", 405*4882a593Smuzhiyun __entry->rw ? "write" : "read", 406*4882a593Smuzhiyun __entry->cr, __entry->val) 407*4882a593Smuzhiyun ); 408*4882a593Smuzhiyun 409*4882a593Smuzhiyun #define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val) 410*4882a593Smuzhiyun #define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val) 411*4882a593Smuzhiyun 412*4882a593Smuzhiyun TRACE_EVENT(kvm_pic_set_irq, 413*4882a593Smuzhiyun TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced), 414*4882a593Smuzhiyun TP_ARGS(chip, pin, elcr, imr, coalesced), 415*4882a593Smuzhiyun 416*4882a593Smuzhiyun TP_STRUCT__entry( 417*4882a593Smuzhiyun __field( __u8, chip ) 418*4882a593Smuzhiyun __field( __u8, pin ) 419*4882a593Smuzhiyun __field( __u8, elcr ) 420*4882a593Smuzhiyun __field( __u8, imr ) 421*4882a593Smuzhiyun __field( bool, coalesced ) 422*4882a593Smuzhiyun ), 423*4882a593Smuzhiyun 424*4882a593Smuzhiyun TP_fast_assign( 425*4882a593Smuzhiyun __entry->chip = chip; 426*4882a593Smuzhiyun __entry->pin = pin; 427*4882a593Smuzhiyun __entry->elcr = elcr; 428*4882a593Smuzhiyun __entry->imr = imr; 429*4882a593Smuzhiyun __entry->coalesced = coalesced; 430*4882a593Smuzhiyun ), 431*4882a593Smuzhiyun 432*4882a593Smuzhiyun TP_printk("chip %u pin %u (%s%s)%s", 433*4882a593Smuzhiyun __entry->chip, __entry->pin, 434*4882a593Smuzhiyun (__entry->elcr & (1 << __entry->pin)) ? "level":"edge", 435*4882a593Smuzhiyun (__entry->imr & (1 << __entry->pin)) ? "|masked":"", 436*4882a593Smuzhiyun __entry->coalesced ? " (coalesced)" : "") 437*4882a593Smuzhiyun ); 438*4882a593Smuzhiyun 439*4882a593Smuzhiyun #define kvm_apic_dst_shorthand \ 440*4882a593Smuzhiyun {0x0, "dst"}, \ 441*4882a593Smuzhiyun {0x1, "self"}, \ 442*4882a593Smuzhiyun {0x2, "all"}, \ 443*4882a593Smuzhiyun {0x3, "all-but-self"} 444*4882a593Smuzhiyun 445*4882a593Smuzhiyun TRACE_EVENT(kvm_apic_ipi, 446*4882a593Smuzhiyun TP_PROTO(__u32 icr_low, __u32 dest_id), 447*4882a593Smuzhiyun TP_ARGS(icr_low, dest_id), 448*4882a593Smuzhiyun 449*4882a593Smuzhiyun TP_STRUCT__entry( 450*4882a593Smuzhiyun __field( __u32, icr_low ) 451*4882a593Smuzhiyun __field( __u32, dest_id ) 452*4882a593Smuzhiyun ), 453*4882a593Smuzhiyun 454*4882a593Smuzhiyun TP_fast_assign( 455*4882a593Smuzhiyun __entry->icr_low = icr_low; 456*4882a593Smuzhiyun __entry->dest_id = dest_id; 457*4882a593Smuzhiyun ), 458*4882a593Smuzhiyun 459*4882a593Smuzhiyun TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)", 460*4882a593Smuzhiyun __entry->dest_id, (u8)__entry->icr_low, 461*4882a593Smuzhiyun __print_symbolic((__entry->icr_low >> 8 & 0x7), 462*4882a593Smuzhiyun kvm_deliver_mode), 463*4882a593Smuzhiyun (__entry->icr_low & (1<<11)) ? "logical" : "physical", 464*4882a593Smuzhiyun (__entry->icr_low & (1<<14)) ? "assert" : "de-assert", 465*4882a593Smuzhiyun (__entry->icr_low & (1<<15)) ? "level" : "edge", 466*4882a593Smuzhiyun __print_symbolic((__entry->icr_low >> 18 & 0x3), 467*4882a593Smuzhiyun kvm_apic_dst_shorthand)) 468*4882a593Smuzhiyun ); 469*4882a593Smuzhiyun 470*4882a593Smuzhiyun TRACE_EVENT(kvm_apic_accept_irq, 471*4882a593Smuzhiyun TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec), 472*4882a593Smuzhiyun TP_ARGS(apicid, dm, tm, vec), 473*4882a593Smuzhiyun 474*4882a593Smuzhiyun TP_STRUCT__entry( 475*4882a593Smuzhiyun __field( __u32, apicid ) 476*4882a593Smuzhiyun __field( __u16, dm ) 477*4882a593Smuzhiyun __field( __u16, tm ) 478*4882a593Smuzhiyun __field( __u8, vec ) 479*4882a593Smuzhiyun ), 480*4882a593Smuzhiyun 481*4882a593Smuzhiyun TP_fast_assign( 482*4882a593Smuzhiyun __entry->apicid = apicid; 483*4882a593Smuzhiyun __entry->dm = dm; 484*4882a593Smuzhiyun __entry->tm = tm; 485*4882a593Smuzhiyun __entry->vec = vec; 486*4882a593Smuzhiyun ), 487*4882a593Smuzhiyun 488*4882a593Smuzhiyun TP_printk("apicid %x vec %u (%s|%s)", 489*4882a593Smuzhiyun __entry->apicid, __entry->vec, 490*4882a593Smuzhiyun __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode), 491*4882a593Smuzhiyun __entry->tm ? "level" : "edge") 492*4882a593Smuzhiyun ); 493*4882a593Smuzhiyun 494*4882a593Smuzhiyun TRACE_EVENT(kvm_eoi, 495*4882a593Smuzhiyun TP_PROTO(struct kvm_lapic *apic, int vector), 496*4882a593Smuzhiyun TP_ARGS(apic, vector), 497*4882a593Smuzhiyun 498*4882a593Smuzhiyun TP_STRUCT__entry( 499*4882a593Smuzhiyun __field( __u32, apicid ) 500*4882a593Smuzhiyun __field( int, vector ) 501*4882a593Smuzhiyun ), 502*4882a593Smuzhiyun 503*4882a593Smuzhiyun TP_fast_assign( 504*4882a593Smuzhiyun __entry->apicid = apic->vcpu->vcpu_id; 505*4882a593Smuzhiyun __entry->vector = vector; 506*4882a593Smuzhiyun ), 507*4882a593Smuzhiyun 508*4882a593Smuzhiyun TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 509*4882a593Smuzhiyun ); 510*4882a593Smuzhiyun 511*4882a593Smuzhiyun TRACE_EVENT(kvm_pv_eoi, 512*4882a593Smuzhiyun TP_PROTO(struct kvm_lapic *apic, int vector), 513*4882a593Smuzhiyun TP_ARGS(apic, vector), 514*4882a593Smuzhiyun 515*4882a593Smuzhiyun TP_STRUCT__entry( 516*4882a593Smuzhiyun __field( __u32, apicid ) 517*4882a593Smuzhiyun __field( int, vector ) 518*4882a593Smuzhiyun ), 519*4882a593Smuzhiyun 520*4882a593Smuzhiyun TP_fast_assign( 521*4882a593Smuzhiyun __entry->apicid = apic->vcpu->vcpu_id; 522*4882a593Smuzhiyun __entry->vector = vector; 523*4882a593Smuzhiyun ), 524*4882a593Smuzhiyun 525*4882a593Smuzhiyun TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector) 526*4882a593Smuzhiyun ); 527*4882a593Smuzhiyun 528*4882a593Smuzhiyun /* 529*4882a593Smuzhiyun * Tracepoint for nested VMRUN 530*4882a593Smuzhiyun */ 531*4882a593Smuzhiyun TRACE_EVENT(kvm_nested_vmrun, 532*4882a593Smuzhiyun TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl, 533*4882a593Smuzhiyun __u32 event_inj, bool npt), 534*4882a593Smuzhiyun TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt), 535*4882a593Smuzhiyun 536*4882a593Smuzhiyun TP_STRUCT__entry( 537*4882a593Smuzhiyun __field( __u64, rip ) 538*4882a593Smuzhiyun __field( __u64, vmcb ) 539*4882a593Smuzhiyun __field( __u64, nested_rip ) 540*4882a593Smuzhiyun __field( __u32, int_ctl ) 541*4882a593Smuzhiyun __field( __u32, event_inj ) 542*4882a593Smuzhiyun __field( bool, npt ) 543*4882a593Smuzhiyun ), 544*4882a593Smuzhiyun 545*4882a593Smuzhiyun TP_fast_assign( 546*4882a593Smuzhiyun __entry->rip = rip; 547*4882a593Smuzhiyun __entry->vmcb = vmcb; 548*4882a593Smuzhiyun __entry->nested_rip = nested_rip; 549*4882a593Smuzhiyun __entry->int_ctl = int_ctl; 550*4882a593Smuzhiyun __entry->event_inj = event_inj; 551*4882a593Smuzhiyun __entry->npt = npt; 552*4882a593Smuzhiyun ), 553*4882a593Smuzhiyun 554*4882a593Smuzhiyun TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x " 555*4882a593Smuzhiyun "event_inj: 0x%08x npt: %s", 556*4882a593Smuzhiyun __entry->rip, __entry->vmcb, __entry->nested_rip, 557*4882a593Smuzhiyun __entry->int_ctl, __entry->event_inj, 558*4882a593Smuzhiyun __entry->npt ? "on" : "off") 559*4882a593Smuzhiyun ); 560*4882a593Smuzhiyun 561*4882a593Smuzhiyun TRACE_EVENT(kvm_nested_intercepts, 562*4882a593Smuzhiyun TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, 563*4882a593Smuzhiyun __u32 intercept1, __u32 intercept2, __u32 intercept3), 564*4882a593Smuzhiyun TP_ARGS(cr_read, cr_write, exceptions, intercept1, 565*4882a593Smuzhiyun intercept2, intercept3), 566*4882a593Smuzhiyun 567*4882a593Smuzhiyun TP_STRUCT__entry( 568*4882a593Smuzhiyun __field( __u16, cr_read ) 569*4882a593Smuzhiyun __field( __u16, cr_write ) 570*4882a593Smuzhiyun __field( __u32, exceptions ) 571*4882a593Smuzhiyun __field( __u32, intercept1 ) 572*4882a593Smuzhiyun __field( __u32, intercept2 ) 573*4882a593Smuzhiyun __field( __u32, intercept3 ) 574*4882a593Smuzhiyun ), 575*4882a593Smuzhiyun 576*4882a593Smuzhiyun TP_fast_assign( 577*4882a593Smuzhiyun __entry->cr_read = cr_read; 578*4882a593Smuzhiyun __entry->cr_write = cr_write; 579*4882a593Smuzhiyun __entry->exceptions = exceptions; 580*4882a593Smuzhiyun __entry->intercept1 = intercept1; 581*4882a593Smuzhiyun __entry->intercept2 = intercept2; 582*4882a593Smuzhiyun __entry->intercept3 = intercept3; 583*4882a593Smuzhiyun ), 584*4882a593Smuzhiyun 585*4882a593Smuzhiyun TP_printk("cr_read: %04x cr_write: %04x excp: %08x " 586*4882a593Smuzhiyun "intercepts: %08x %08x %08x", 587*4882a593Smuzhiyun __entry->cr_read, __entry->cr_write, __entry->exceptions, 588*4882a593Smuzhiyun __entry->intercept1, __entry->intercept2, __entry->intercept3) 589*4882a593Smuzhiyun ); 590*4882a593Smuzhiyun /* 591*4882a593Smuzhiyun * Tracepoint for #VMEXIT while nested 592*4882a593Smuzhiyun */ 593*4882a593Smuzhiyun TRACE_EVENT_KVM_EXIT(kvm_nested_vmexit); 594*4882a593Smuzhiyun 595*4882a593Smuzhiyun /* 596*4882a593Smuzhiyun * Tracepoint for #VMEXIT reinjected to the guest 597*4882a593Smuzhiyun */ 598*4882a593Smuzhiyun TRACE_EVENT(kvm_nested_vmexit_inject, 599*4882a593Smuzhiyun TP_PROTO(__u32 exit_code, 600*4882a593Smuzhiyun __u64 exit_info1, __u64 exit_info2, 601*4882a593Smuzhiyun __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa), 602*4882a593Smuzhiyun TP_ARGS(exit_code, exit_info1, exit_info2, 603*4882a593Smuzhiyun exit_int_info, exit_int_info_err, isa), 604*4882a593Smuzhiyun 605*4882a593Smuzhiyun TP_STRUCT__entry( 606*4882a593Smuzhiyun __field( __u32, exit_code ) 607*4882a593Smuzhiyun __field( __u64, exit_info1 ) 608*4882a593Smuzhiyun __field( __u64, exit_info2 ) 609*4882a593Smuzhiyun __field( __u32, exit_int_info ) 610*4882a593Smuzhiyun __field( __u32, exit_int_info_err ) 611*4882a593Smuzhiyun __field( __u32, isa ) 612*4882a593Smuzhiyun ), 613*4882a593Smuzhiyun 614*4882a593Smuzhiyun TP_fast_assign( 615*4882a593Smuzhiyun __entry->exit_code = exit_code; 616*4882a593Smuzhiyun __entry->exit_info1 = exit_info1; 617*4882a593Smuzhiyun __entry->exit_info2 = exit_info2; 618*4882a593Smuzhiyun __entry->exit_int_info = exit_int_info; 619*4882a593Smuzhiyun __entry->exit_int_info_err = exit_int_info_err; 620*4882a593Smuzhiyun __entry->isa = isa; 621*4882a593Smuzhiyun ), 622*4882a593Smuzhiyun 623*4882a593Smuzhiyun TP_printk("reason: %s%s%s ext_inf1: 0x%016llx " 624*4882a593Smuzhiyun "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x", 625*4882a593Smuzhiyun kvm_print_exit_reason(__entry->exit_code, __entry->isa), 626*4882a593Smuzhiyun __entry->exit_info1, __entry->exit_info2, 627*4882a593Smuzhiyun __entry->exit_int_info, __entry->exit_int_info_err) 628*4882a593Smuzhiyun ); 629*4882a593Smuzhiyun 630*4882a593Smuzhiyun /* 631*4882a593Smuzhiyun * Tracepoint for nested #vmexit because of interrupt pending 632*4882a593Smuzhiyun */ 633*4882a593Smuzhiyun TRACE_EVENT(kvm_nested_intr_vmexit, 634*4882a593Smuzhiyun TP_PROTO(__u64 rip), 635*4882a593Smuzhiyun TP_ARGS(rip), 636*4882a593Smuzhiyun 637*4882a593Smuzhiyun TP_STRUCT__entry( 638*4882a593Smuzhiyun __field( __u64, rip ) 639*4882a593Smuzhiyun ), 640*4882a593Smuzhiyun 641*4882a593Smuzhiyun TP_fast_assign( 642*4882a593Smuzhiyun __entry->rip = rip 643*4882a593Smuzhiyun ), 644*4882a593Smuzhiyun 645*4882a593Smuzhiyun TP_printk("rip: 0x%016llx", __entry->rip) 646*4882a593Smuzhiyun ); 647*4882a593Smuzhiyun 648*4882a593Smuzhiyun /* 649*4882a593Smuzhiyun * Tracepoint for nested #vmexit because of interrupt pending 650*4882a593Smuzhiyun */ 651*4882a593Smuzhiyun TRACE_EVENT(kvm_invlpga, 652*4882a593Smuzhiyun TP_PROTO(__u64 rip, int asid, u64 address), 653*4882a593Smuzhiyun TP_ARGS(rip, asid, address), 654*4882a593Smuzhiyun 655*4882a593Smuzhiyun TP_STRUCT__entry( 656*4882a593Smuzhiyun __field( __u64, rip ) 657*4882a593Smuzhiyun __field( int, asid ) 658*4882a593Smuzhiyun __field( __u64, address ) 659*4882a593Smuzhiyun ), 660*4882a593Smuzhiyun 661*4882a593Smuzhiyun TP_fast_assign( 662*4882a593Smuzhiyun __entry->rip = rip; 663*4882a593Smuzhiyun __entry->asid = asid; 664*4882a593Smuzhiyun __entry->address = address; 665*4882a593Smuzhiyun ), 666*4882a593Smuzhiyun 667*4882a593Smuzhiyun TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx", 668*4882a593Smuzhiyun __entry->rip, __entry->asid, __entry->address) 669*4882a593Smuzhiyun ); 670*4882a593Smuzhiyun 671*4882a593Smuzhiyun /* 672*4882a593Smuzhiyun * Tracepoint for nested #vmexit because of interrupt pending 673*4882a593Smuzhiyun */ 674*4882a593Smuzhiyun TRACE_EVENT(kvm_skinit, 675*4882a593Smuzhiyun TP_PROTO(__u64 rip, __u32 slb), 676*4882a593Smuzhiyun TP_ARGS(rip, slb), 677*4882a593Smuzhiyun 678*4882a593Smuzhiyun TP_STRUCT__entry( 679*4882a593Smuzhiyun __field( __u64, rip ) 680*4882a593Smuzhiyun __field( __u32, slb ) 681*4882a593Smuzhiyun ), 682*4882a593Smuzhiyun 683*4882a593Smuzhiyun TP_fast_assign( 684*4882a593Smuzhiyun __entry->rip = rip; 685*4882a593Smuzhiyun __entry->slb = slb; 686*4882a593Smuzhiyun ), 687*4882a593Smuzhiyun 688*4882a593Smuzhiyun TP_printk("rip: 0x%016llx slb: 0x%08x", 689*4882a593Smuzhiyun __entry->rip, __entry->slb) 690*4882a593Smuzhiyun ); 691*4882a593Smuzhiyun 692*4882a593Smuzhiyun #define KVM_EMUL_INSN_F_CR0_PE (1 << 0) 693*4882a593Smuzhiyun #define KVM_EMUL_INSN_F_EFL_VM (1 << 1) 694*4882a593Smuzhiyun #define KVM_EMUL_INSN_F_CS_D (1 << 2) 695*4882a593Smuzhiyun #define KVM_EMUL_INSN_F_CS_L (1 << 3) 696*4882a593Smuzhiyun 697*4882a593Smuzhiyun #define kvm_trace_symbol_emul_flags \ 698*4882a593Smuzhiyun { 0, "real" }, \ 699*4882a593Smuzhiyun { KVM_EMUL_INSN_F_CR0_PE \ 700*4882a593Smuzhiyun | KVM_EMUL_INSN_F_EFL_VM, "vm16" }, \ 701*4882a593Smuzhiyun { KVM_EMUL_INSN_F_CR0_PE, "prot16" }, \ 702*4882a593Smuzhiyun { KVM_EMUL_INSN_F_CR0_PE \ 703*4882a593Smuzhiyun | KVM_EMUL_INSN_F_CS_D, "prot32" }, \ 704*4882a593Smuzhiyun { KVM_EMUL_INSN_F_CR0_PE \ 705*4882a593Smuzhiyun | KVM_EMUL_INSN_F_CS_L, "prot64" } 706*4882a593Smuzhiyun 707*4882a593Smuzhiyun #define kei_decode_mode(mode) ({ \ 708*4882a593Smuzhiyun u8 flags = 0xff; \ 709*4882a593Smuzhiyun switch (mode) { \ 710*4882a593Smuzhiyun case X86EMUL_MODE_REAL: \ 711*4882a593Smuzhiyun flags = 0; \ 712*4882a593Smuzhiyun break; \ 713*4882a593Smuzhiyun case X86EMUL_MODE_VM86: \ 714*4882a593Smuzhiyun flags = KVM_EMUL_INSN_F_EFL_VM; \ 715*4882a593Smuzhiyun break; \ 716*4882a593Smuzhiyun case X86EMUL_MODE_PROT16: \ 717*4882a593Smuzhiyun flags = KVM_EMUL_INSN_F_CR0_PE; \ 718*4882a593Smuzhiyun break; \ 719*4882a593Smuzhiyun case X86EMUL_MODE_PROT32: \ 720*4882a593Smuzhiyun flags = KVM_EMUL_INSN_F_CR0_PE \ 721*4882a593Smuzhiyun | KVM_EMUL_INSN_F_CS_D; \ 722*4882a593Smuzhiyun break; \ 723*4882a593Smuzhiyun case X86EMUL_MODE_PROT64: \ 724*4882a593Smuzhiyun flags = KVM_EMUL_INSN_F_CR0_PE \ 725*4882a593Smuzhiyun | KVM_EMUL_INSN_F_CS_L; \ 726*4882a593Smuzhiyun break; \ 727*4882a593Smuzhiyun } \ 728*4882a593Smuzhiyun flags; \ 729*4882a593Smuzhiyun }) 730*4882a593Smuzhiyun 731*4882a593Smuzhiyun TRACE_EVENT(kvm_emulate_insn, 732*4882a593Smuzhiyun TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed), 733*4882a593Smuzhiyun TP_ARGS(vcpu, failed), 734*4882a593Smuzhiyun 735*4882a593Smuzhiyun TP_STRUCT__entry( 736*4882a593Smuzhiyun __field( __u64, rip ) 737*4882a593Smuzhiyun __field( __u32, csbase ) 738*4882a593Smuzhiyun __field( __u8, len ) 739*4882a593Smuzhiyun __array( __u8, insn, 15 ) 740*4882a593Smuzhiyun __field( __u8, flags ) 741*4882a593Smuzhiyun __field( __u8, failed ) 742*4882a593Smuzhiyun ), 743*4882a593Smuzhiyun 744*4882a593Smuzhiyun TP_fast_assign( 745*4882a593Smuzhiyun __entry->csbase = kvm_x86_ops.get_segment_base(vcpu, VCPU_SREG_CS); 746*4882a593Smuzhiyun __entry->len = vcpu->arch.emulate_ctxt->fetch.ptr 747*4882a593Smuzhiyun - vcpu->arch.emulate_ctxt->fetch.data; 748*4882a593Smuzhiyun __entry->rip = vcpu->arch.emulate_ctxt->_eip - __entry->len; 749*4882a593Smuzhiyun memcpy(__entry->insn, 750*4882a593Smuzhiyun vcpu->arch.emulate_ctxt->fetch.data, 751*4882a593Smuzhiyun 15); 752*4882a593Smuzhiyun __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt->mode); 753*4882a593Smuzhiyun __entry->failed = failed; 754*4882a593Smuzhiyun ), 755*4882a593Smuzhiyun 756*4882a593Smuzhiyun TP_printk("%x:%llx:%s (%s)%s", 757*4882a593Smuzhiyun __entry->csbase, __entry->rip, 758*4882a593Smuzhiyun __print_hex(__entry->insn, __entry->len), 759*4882a593Smuzhiyun __print_symbolic(__entry->flags, 760*4882a593Smuzhiyun kvm_trace_symbol_emul_flags), 761*4882a593Smuzhiyun __entry->failed ? " failed" : "" 762*4882a593Smuzhiyun ) 763*4882a593Smuzhiyun ); 764*4882a593Smuzhiyun 765*4882a593Smuzhiyun #define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0) 766*4882a593Smuzhiyun #define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1) 767*4882a593Smuzhiyun 768*4882a593Smuzhiyun TRACE_EVENT( 769*4882a593Smuzhiyun vcpu_match_mmio, 770*4882a593Smuzhiyun TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match), 771*4882a593Smuzhiyun TP_ARGS(gva, gpa, write, gpa_match), 772*4882a593Smuzhiyun 773*4882a593Smuzhiyun TP_STRUCT__entry( 774*4882a593Smuzhiyun __field(gva_t, gva) 775*4882a593Smuzhiyun __field(gpa_t, gpa) 776*4882a593Smuzhiyun __field(bool, write) 777*4882a593Smuzhiyun __field(bool, gpa_match) 778*4882a593Smuzhiyun ), 779*4882a593Smuzhiyun 780*4882a593Smuzhiyun TP_fast_assign( 781*4882a593Smuzhiyun __entry->gva = gva; 782*4882a593Smuzhiyun __entry->gpa = gpa; 783*4882a593Smuzhiyun __entry->write = write; 784*4882a593Smuzhiyun __entry->gpa_match = gpa_match 785*4882a593Smuzhiyun ), 786*4882a593Smuzhiyun 787*4882a593Smuzhiyun TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa, 788*4882a593Smuzhiyun __entry->write ? "Write" : "Read", 789*4882a593Smuzhiyun __entry->gpa_match ? "GPA" : "GVA") 790*4882a593Smuzhiyun ); 791*4882a593Smuzhiyun 792*4882a593Smuzhiyun TRACE_EVENT(kvm_write_tsc_offset, 793*4882a593Smuzhiyun TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset, 794*4882a593Smuzhiyun __u64 next_tsc_offset), 795*4882a593Smuzhiyun TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset), 796*4882a593Smuzhiyun 797*4882a593Smuzhiyun TP_STRUCT__entry( 798*4882a593Smuzhiyun __field( unsigned int, vcpu_id ) 799*4882a593Smuzhiyun __field( __u64, previous_tsc_offset ) 800*4882a593Smuzhiyun __field( __u64, next_tsc_offset ) 801*4882a593Smuzhiyun ), 802*4882a593Smuzhiyun 803*4882a593Smuzhiyun TP_fast_assign( 804*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 805*4882a593Smuzhiyun __entry->previous_tsc_offset = previous_tsc_offset; 806*4882a593Smuzhiyun __entry->next_tsc_offset = next_tsc_offset; 807*4882a593Smuzhiyun ), 808*4882a593Smuzhiyun 809*4882a593Smuzhiyun TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id, 810*4882a593Smuzhiyun __entry->previous_tsc_offset, __entry->next_tsc_offset) 811*4882a593Smuzhiyun ); 812*4882a593Smuzhiyun 813*4882a593Smuzhiyun #ifdef CONFIG_X86_64 814*4882a593Smuzhiyun 815*4882a593Smuzhiyun #define host_clocks \ 816*4882a593Smuzhiyun {VDSO_CLOCKMODE_NONE, "none"}, \ 817*4882a593Smuzhiyun {VDSO_CLOCKMODE_TSC, "tsc"} \ 818*4882a593Smuzhiyun 819*4882a593Smuzhiyun TRACE_EVENT(kvm_update_master_clock, 820*4882a593Smuzhiyun TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched), 821*4882a593Smuzhiyun TP_ARGS(use_master_clock, host_clock, offset_matched), 822*4882a593Smuzhiyun 823*4882a593Smuzhiyun TP_STRUCT__entry( 824*4882a593Smuzhiyun __field( bool, use_master_clock ) 825*4882a593Smuzhiyun __field( unsigned int, host_clock ) 826*4882a593Smuzhiyun __field( bool, offset_matched ) 827*4882a593Smuzhiyun ), 828*4882a593Smuzhiyun 829*4882a593Smuzhiyun TP_fast_assign( 830*4882a593Smuzhiyun __entry->use_master_clock = use_master_clock; 831*4882a593Smuzhiyun __entry->host_clock = host_clock; 832*4882a593Smuzhiyun __entry->offset_matched = offset_matched; 833*4882a593Smuzhiyun ), 834*4882a593Smuzhiyun 835*4882a593Smuzhiyun TP_printk("masterclock %d hostclock %s offsetmatched %u", 836*4882a593Smuzhiyun __entry->use_master_clock, 837*4882a593Smuzhiyun __print_symbolic(__entry->host_clock, host_clocks), 838*4882a593Smuzhiyun __entry->offset_matched) 839*4882a593Smuzhiyun ); 840*4882a593Smuzhiyun 841*4882a593Smuzhiyun TRACE_EVENT(kvm_track_tsc, 842*4882a593Smuzhiyun TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched, 843*4882a593Smuzhiyun unsigned int online_vcpus, bool use_master_clock, 844*4882a593Smuzhiyun unsigned int host_clock), 845*4882a593Smuzhiyun TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock, 846*4882a593Smuzhiyun host_clock), 847*4882a593Smuzhiyun 848*4882a593Smuzhiyun TP_STRUCT__entry( 849*4882a593Smuzhiyun __field( unsigned int, vcpu_id ) 850*4882a593Smuzhiyun __field( unsigned int, nr_vcpus_matched_tsc ) 851*4882a593Smuzhiyun __field( unsigned int, online_vcpus ) 852*4882a593Smuzhiyun __field( bool, use_master_clock ) 853*4882a593Smuzhiyun __field( unsigned int, host_clock ) 854*4882a593Smuzhiyun ), 855*4882a593Smuzhiyun 856*4882a593Smuzhiyun TP_fast_assign( 857*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 858*4882a593Smuzhiyun __entry->nr_vcpus_matched_tsc = nr_matched; 859*4882a593Smuzhiyun __entry->online_vcpus = online_vcpus; 860*4882a593Smuzhiyun __entry->use_master_clock = use_master_clock; 861*4882a593Smuzhiyun __entry->host_clock = host_clock; 862*4882a593Smuzhiyun ), 863*4882a593Smuzhiyun 864*4882a593Smuzhiyun TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u" 865*4882a593Smuzhiyun " hostclock %s", 866*4882a593Smuzhiyun __entry->vcpu_id, __entry->use_master_clock, 867*4882a593Smuzhiyun __entry->nr_vcpus_matched_tsc, __entry->online_vcpus, 868*4882a593Smuzhiyun __print_symbolic(__entry->host_clock, host_clocks)) 869*4882a593Smuzhiyun ); 870*4882a593Smuzhiyun 871*4882a593Smuzhiyun #endif /* CONFIG_X86_64 */ 872*4882a593Smuzhiyun 873*4882a593Smuzhiyun /* 874*4882a593Smuzhiyun * Tracepoint for PML full VMEXIT. 875*4882a593Smuzhiyun */ 876*4882a593Smuzhiyun TRACE_EVENT(kvm_pml_full, 877*4882a593Smuzhiyun TP_PROTO(unsigned int vcpu_id), 878*4882a593Smuzhiyun TP_ARGS(vcpu_id), 879*4882a593Smuzhiyun 880*4882a593Smuzhiyun TP_STRUCT__entry( 881*4882a593Smuzhiyun __field( unsigned int, vcpu_id ) 882*4882a593Smuzhiyun ), 883*4882a593Smuzhiyun 884*4882a593Smuzhiyun TP_fast_assign( 885*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 886*4882a593Smuzhiyun ), 887*4882a593Smuzhiyun 888*4882a593Smuzhiyun TP_printk("vcpu %d: PML full", __entry->vcpu_id) 889*4882a593Smuzhiyun ); 890*4882a593Smuzhiyun 891*4882a593Smuzhiyun TRACE_EVENT(kvm_ple_window_update, 892*4882a593Smuzhiyun TP_PROTO(unsigned int vcpu_id, unsigned int new, unsigned int old), 893*4882a593Smuzhiyun TP_ARGS(vcpu_id, new, old), 894*4882a593Smuzhiyun 895*4882a593Smuzhiyun TP_STRUCT__entry( 896*4882a593Smuzhiyun __field( unsigned int, vcpu_id ) 897*4882a593Smuzhiyun __field( unsigned int, new ) 898*4882a593Smuzhiyun __field( unsigned int, old ) 899*4882a593Smuzhiyun ), 900*4882a593Smuzhiyun 901*4882a593Smuzhiyun TP_fast_assign( 902*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 903*4882a593Smuzhiyun __entry->new = new; 904*4882a593Smuzhiyun __entry->old = old; 905*4882a593Smuzhiyun ), 906*4882a593Smuzhiyun 907*4882a593Smuzhiyun TP_printk("vcpu %u old %u new %u (%s)", 908*4882a593Smuzhiyun __entry->vcpu_id, __entry->old, __entry->new, 909*4882a593Smuzhiyun __entry->old < __entry->new ? "growed" : "shrinked") 910*4882a593Smuzhiyun ); 911*4882a593Smuzhiyun 912*4882a593Smuzhiyun TRACE_EVENT(kvm_pvclock_update, 913*4882a593Smuzhiyun TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock), 914*4882a593Smuzhiyun TP_ARGS(vcpu_id, pvclock), 915*4882a593Smuzhiyun 916*4882a593Smuzhiyun TP_STRUCT__entry( 917*4882a593Smuzhiyun __field( unsigned int, vcpu_id ) 918*4882a593Smuzhiyun __field( __u32, version ) 919*4882a593Smuzhiyun __field( __u64, tsc_timestamp ) 920*4882a593Smuzhiyun __field( __u64, system_time ) 921*4882a593Smuzhiyun __field( __u32, tsc_to_system_mul ) 922*4882a593Smuzhiyun __field( __s8, tsc_shift ) 923*4882a593Smuzhiyun __field( __u8, flags ) 924*4882a593Smuzhiyun ), 925*4882a593Smuzhiyun 926*4882a593Smuzhiyun TP_fast_assign( 927*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 928*4882a593Smuzhiyun __entry->version = pvclock->version; 929*4882a593Smuzhiyun __entry->tsc_timestamp = pvclock->tsc_timestamp; 930*4882a593Smuzhiyun __entry->system_time = pvclock->system_time; 931*4882a593Smuzhiyun __entry->tsc_to_system_mul = pvclock->tsc_to_system_mul; 932*4882a593Smuzhiyun __entry->tsc_shift = pvclock->tsc_shift; 933*4882a593Smuzhiyun __entry->flags = pvclock->flags; 934*4882a593Smuzhiyun ), 935*4882a593Smuzhiyun 936*4882a593Smuzhiyun TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, " 937*4882a593Smuzhiyun "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, " 938*4882a593Smuzhiyun "flags 0x%x }", 939*4882a593Smuzhiyun __entry->vcpu_id, 940*4882a593Smuzhiyun __entry->version, 941*4882a593Smuzhiyun __entry->tsc_timestamp, 942*4882a593Smuzhiyun __entry->system_time, 943*4882a593Smuzhiyun __entry->tsc_to_system_mul, 944*4882a593Smuzhiyun __entry->tsc_shift, 945*4882a593Smuzhiyun __entry->flags) 946*4882a593Smuzhiyun ); 947*4882a593Smuzhiyun 948*4882a593Smuzhiyun TRACE_EVENT(kvm_wait_lapic_expire, 949*4882a593Smuzhiyun TP_PROTO(unsigned int vcpu_id, s64 delta), 950*4882a593Smuzhiyun TP_ARGS(vcpu_id, delta), 951*4882a593Smuzhiyun 952*4882a593Smuzhiyun TP_STRUCT__entry( 953*4882a593Smuzhiyun __field( unsigned int, vcpu_id ) 954*4882a593Smuzhiyun __field( s64, delta ) 955*4882a593Smuzhiyun ), 956*4882a593Smuzhiyun 957*4882a593Smuzhiyun TP_fast_assign( 958*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 959*4882a593Smuzhiyun __entry->delta = delta; 960*4882a593Smuzhiyun ), 961*4882a593Smuzhiyun 962*4882a593Smuzhiyun TP_printk("vcpu %u: delta %lld (%s)", 963*4882a593Smuzhiyun __entry->vcpu_id, 964*4882a593Smuzhiyun __entry->delta, 965*4882a593Smuzhiyun __entry->delta < 0 ? "early" : "late") 966*4882a593Smuzhiyun ); 967*4882a593Smuzhiyun 968*4882a593Smuzhiyun TRACE_EVENT(kvm_enter_smm, 969*4882a593Smuzhiyun TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering), 970*4882a593Smuzhiyun TP_ARGS(vcpu_id, smbase, entering), 971*4882a593Smuzhiyun 972*4882a593Smuzhiyun TP_STRUCT__entry( 973*4882a593Smuzhiyun __field( unsigned int, vcpu_id ) 974*4882a593Smuzhiyun __field( u64, smbase ) 975*4882a593Smuzhiyun __field( bool, entering ) 976*4882a593Smuzhiyun ), 977*4882a593Smuzhiyun 978*4882a593Smuzhiyun TP_fast_assign( 979*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 980*4882a593Smuzhiyun __entry->smbase = smbase; 981*4882a593Smuzhiyun __entry->entering = entering; 982*4882a593Smuzhiyun ), 983*4882a593Smuzhiyun 984*4882a593Smuzhiyun TP_printk("vcpu %u: %s SMM, smbase 0x%llx", 985*4882a593Smuzhiyun __entry->vcpu_id, 986*4882a593Smuzhiyun __entry->entering ? "entering" : "leaving", 987*4882a593Smuzhiyun __entry->smbase) 988*4882a593Smuzhiyun ); 989*4882a593Smuzhiyun 990*4882a593Smuzhiyun /* 991*4882a593Smuzhiyun * Tracepoint for VT-d posted-interrupts. 992*4882a593Smuzhiyun */ 993*4882a593Smuzhiyun TRACE_EVENT(kvm_pi_irte_update, 994*4882a593Smuzhiyun TP_PROTO(unsigned int host_irq, unsigned int vcpu_id, 995*4882a593Smuzhiyun unsigned int gsi, unsigned int gvec, 996*4882a593Smuzhiyun u64 pi_desc_addr, bool set), 997*4882a593Smuzhiyun TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set), 998*4882a593Smuzhiyun 999*4882a593Smuzhiyun TP_STRUCT__entry( 1000*4882a593Smuzhiyun __field( unsigned int, host_irq ) 1001*4882a593Smuzhiyun __field( unsigned int, vcpu_id ) 1002*4882a593Smuzhiyun __field( unsigned int, gsi ) 1003*4882a593Smuzhiyun __field( unsigned int, gvec ) 1004*4882a593Smuzhiyun __field( u64, pi_desc_addr ) 1005*4882a593Smuzhiyun __field( bool, set ) 1006*4882a593Smuzhiyun ), 1007*4882a593Smuzhiyun 1008*4882a593Smuzhiyun TP_fast_assign( 1009*4882a593Smuzhiyun __entry->host_irq = host_irq; 1010*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 1011*4882a593Smuzhiyun __entry->gsi = gsi; 1012*4882a593Smuzhiyun __entry->gvec = gvec; 1013*4882a593Smuzhiyun __entry->pi_desc_addr = pi_desc_addr; 1014*4882a593Smuzhiyun __entry->set = set; 1015*4882a593Smuzhiyun ), 1016*4882a593Smuzhiyun 1017*4882a593Smuzhiyun TP_printk("VT-d PI is %s for irq %u, vcpu %u, gsi: 0x%x, " 1018*4882a593Smuzhiyun "gvec: 0x%x, pi_desc_addr: 0x%llx", 1019*4882a593Smuzhiyun __entry->set ? "enabled and being updated" : "disabled", 1020*4882a593Smuzhiyun __entry->host_irq, 1021*4882a593Smuzhiyun __entry->vcpu_id, 1022*4882a593Smuzhiyun __entry->gsi, 1023*4882a593Smuzhiyun __entry->gvec, 1024*4882a593Smuzhiyun __entry->pi_desc_addr) 1025*4882a593Smuzhiyun ); 1026*4882a593Smuzhiyun 1027*4882a593Smuzhiyun /* 1028*4882a593Smuzhiyun * Tracepoint for kvm_hv_notify_acked_sint. 1029*4882a593Smuzhiyun */ 1030*4882a593Smuzhiyun TRACE_EVENT(kvm_hv_notify_acked_sint, 1031*4882a593Smuzhiyun TP_PROTO(int vcpu_id, u32 sint), 1032*4882a593Smuzhiyun TP_ARGS(vcpu_id, sint), 1033*4882a593Smuzhiyun 1034*4882a593Smuzhiyun TP_STRUCT__entry( 1035*4882a593Smuzhiyun __field(int, vcpu_id) 1036*4882a593Smuzhiyun __field(u32, sint) 1037*4882a593Smuzhiyun ), 1038*4882a593Smuzhiyun 1039*4882a593Smuzhiyun TP_fast_assign( 1040*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 1041*4882a593Smuzhiyun __entry->sint = sint; 1042*4882a593Smuzhiyun ), 1043*4882a593Smuzhiyun 1044*4882a593Smuzhiyun TP_printk("vcpu_id %d sint %u", __entry->vcpu_id, __entry->sint) 1045*4882a593Smuzhiyun ); 1046*4882a593Smuzhiyun 1047*4882a593Smuzhiyun /* 1048*4882a593Smuzhiyun * Tracepoint for synic_set_irq. 1049*4882a593Smuzhiyun */ 1050*4882a593Smuzhiyun TRACE_EVENT(kvm_hv_synic_set_irq, 1051*4882a593Smuzhiyun TP_PROTO(int vcpu_id, u32 sint, int vector, int ret), 1052*4882a593Smuzhiyun TP_ARGS(vcpu_id, sint, vector, ret), 1053*4882a593Smuzhiyun 1054*4882a593Smuzhiyun TP_STRUCT__entry( 1055*4882a593Smuzhiyun __field(int, vcpu_id) 1056*4882a593Smuzhiyun __field(u32, sint) 1057*4882a593Smuzhiyun __field(int, vector) 1058*4882a593Smuzhiyun __field(int, ret) 1059*4882a593Smuzhiyun ), 1060*4882a593Smuzhiyun 1061*4882a593Smuzhiyun TP_fast_assign( 1062*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 1063*4882a593Smuzhiyun __entry->sint = sint; 1064*4882a593Smuzhiyun __entry->vector = vector; 1065*4882a593Smuzhiyun __entry->ret = ret; 1066*4882a593Smuzhiyun ), 1067*4882a593Smuzhiyun 1068*4882a593Smuzhiyun TP_printk("vcpu_id %d sint %u vector %d ret %d", 1069*4882a593Smuzhiyun __entry->vcpu_id, __entry->sint, __entry->vector, 1070*4882a593Smuzhiyun __entry->ret) 1071*4882a593Smuzhiyun ); 1072*4882a593Smuzhiyun 1073*4882a593Smuzhiyun /* 1074*4882a593Smuzhiyun * Tracepoint for kvm_hv_synic_send_eoi. 1075*4882a593Smuzhiyun */ 1076*4882a593Smuzhiyun TRACE_EVENT(kvm_hv_synic_send_eoi, 1077*4882a593Smuzhiyun TP_PROTO(int vcpu_id, int vector), 1078*4882a593Smuzhiyun TP_ARGS(vcpu_id, vector), 1079*4882a593Smuzhiyun 1080*4882a593Smuzhiyun TP_STRUCT__entry( 1081*4882a593Smuzhiyun __field(int, vcpu_id) 1082*4882a593Smuzhiyun __field(u32, sint) 1083*4882a593Smuzhiyun __field(int, vector) 1084*4882a593Smuzhiyun __field(int, ret) 1085*4882a593Smuzhiyun ), 1086*4882a593Smuzhiyun 1087*4882a593Smuzhiyun TP_fast_assign( 1088*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 1089*4882a593Smuzhiyun __entry->vector = vector; 1090*4882a593Smuzhiyun ), 1091*4882a593Smuzhiyun 1092*4882a593Smuzhiyun TP_printk("vcpu_id %d vector %d", __entry->vcpu_id, __entry->vector) 1093*4882a593Smuzhiyun ); 1094*4882a593Smuzhiyun 1095*4882a593Smuzhiyun /* 1096*4882a593Smuzhiyun * Tracepoint for synic_set_msr. 1097*4882a593Smuzhiyun */ 1098*4882a593Smuzhiyun TRACE_EVENT(kvm_hv_synic_set_msr, 1099*4882a593Smuzhiyun TP_PROTO(int vcpu_id, u32 msr, u64 data, bool host), 1100*4882a593Smuzhiyun TP_ARGS(vcpu_id, msr, data, host), 1101*4882a593Smuzhiyun 1102*4882a593Smuzhiyun TP_STRUCT__entry( 1103*4882a593Smuzhiyun __field(int, vcpu_id) 1104*4882a593Smuzhiyun __field(u32, msr) 1105*4882a593Smuzhiyun __field(u64, data) 1106*4882a593Smuzhiyun __field(bool, host) 1107*4882a593Smuzhiyun ), 1108*4882a593Smuzhiyun 1109*4882a593Smuzhiyun TP_fast_assign( 1110*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 1111*4882a593Smuzhiyun __entry->msr = msr; 1112*4882a593Smuzhiyun __entry->data = data; 1113*4882a593Smuzhiyun __entry->host = host 1114*4882a593Smuzhiyun ), 1115*4882a593Smuzhiyun 1116*4882a593Smuzhiyun TP_printk("vcpu_id %d msr 0x%x data 0x%llx host %d", 1117*4882a593Smuzhiyun __entry->vcpu_id, __entry->msr, __entry->data, __entry->host) 1118*4882a593Smuzhiyun ); 1119*4882a593Smuzhiyun 1120*4882a593Smuzhiyun /* 1121*4882a593Smuzhiyun * Tracepoint for stimer_set_config. 1122*4882a593Smuzhiyun */ 1123*4882a593Smuzhiyun TRACE_EVENT(kvm_hv_stimer_set_config, 1124*4882a593Smuzhiyun TP_PROTO(int vcpu_id, int timer_index, u64 config, bool host), 1125*4882a593Smuzhiyun TP_ARGS(vcpu_id, timer_index, config, host), 1126*4882a593Smuzhiyun 1127*4882a593Smuzhiyun TP_STRUCT__entry( 1128*4882a593Smuzhiyun __field(int, vcpu_id) 1129*4882a593Smuzhiyun __field(int, timer_index) 1130*4882a593Smuzhiyun __field(u64, config) 1131*4882a593Smuzhiyun __field(bool, host) 1132*4882a593Smuzhiyun ), 1133*4882a593Smuzhiyun 1134*4882a593Smuzhiyun TP_fast_assign( 1135*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 1136*4882a593Smuzhiyun __entry->timer_index = timer_index; 1137*4882a593Smuzhiyun __entry->config = config; 1138*4882a593Smuzhiyun __entry->host = host; 1139*4882a593Smuzhiyun ), 1140*4882a593Smuzhiyun 1141*4882a593Smuzhiyun TP_printk("vcpu_id %d timer %d config 0x%llx host %d", 1142*4882a593Smuzhiyun __entry->vcpu_id, __entry->timer_index, __entry->config, 1143*4882a593Smuzhiyun __entry->host) 1144*4882a593Smuzhiyun ); 1145*4882a593Smuzhiyun 1146*4882a593Smuzhiyun /* 1147*4882a593Smuzhiyun * Tracepoint for stimer_set_count. 1148*4882a593Smuzhiyun */ 1149*4882a593Smuzhiyun TRACE_EVENT(kvm_hv_stimer_set_count, 1150*4882a593Smuzhiyun TP_PROTO(int vcpu_id, int timer_index, u64 count, bool host), 1151*4882a593Smuzhiyun TP_ARGS(vcpu_id, timer_index, count, host), 1152*4882a593Smuzhiyun 1153*4882a593Smuzhiyun TP_STRUCT__entry( 1154*4882a593Smuzhiyun __field(int, vcpu_id) 1155*4882a593Smuzhiyun __field(int, timer_index) 1156*4882a593Smuzhiyun __field(u64, count) 1157*4882a593Smuzhiyun __field(bool, host) 1158*4882a593Smuzhiyun ), 1159*4882a593Smuzhiyun 1160*4882a593Smuzhiyun TP_fast_assign( 1161*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 1162*4882a593Smuzhiyun __entry->timer_index = timer_index; 1163*4882a593Smuzhiyun __entry->count = count; 1164*4882a593Smuzhiyun __entry->host = host; 1165*4882a593Smuzhiyun ), 1166*4882a593Smuzhiyun 1167*4882a593Smuzhiyun TP_printk("vcpu_id %d timer %d count %llu host %d", 1168*4882a593Smuzhiyun __entry->vcpu_id, __entry->timer_index, __entry->count, 1169*4882a593Smuzhiyun __entry->host) 1170*4882a593Smuzhiyun ); 1171*4882a593Smuzhiyun 1172*4882a593Smuzhiyun /* 1173*4882a593Smuzhiyun * Tracepoint for stimer_start(periodic timer case). 1174*4882a593Smuzhiyun */ 1175*4882a593Smuzhiyun TRACE_EVENT(kvm_hv_stimer_start_periodic, 1176*4882a593Smuzhiyun TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 exp_time), 1177*4882a593Smuzhiyun TP_ARGS(vcpu_id, timer_index, time_now, exp_time), 1178*4882a593Smuzhiyun 1179*4882a593Smuzhiyun TP_STRUCT__entry( 1180*4882a593Smuzhiyun __field(int, vcpu_id) 1181*4882a593Smuzhiyun __field(int, timer_index) 1182*4882a593Smuzhiyun __field(u64, time_now) 1183*4882a593Smuzhiyun __field(u64, exp_time) 1184*4882a593Smuzhiyun ), 1185*4882a593Smuzhiyun 1186*4882a593Smuzhiyun TP_fast_assign( 1187*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 1188*4882a593Smuzhiyun __entry->timer_index = timer_index; 1189*4882a593Smuzhiyun __entry->time_now = time_now; 1190*4882a593Smuzhiyun __entry->exp_time = exp_time; 1191*4882a593Smuzhiyun ), 1192*4882a593Smuzhiyun 1193*4882a593Smuzhiyun TP_printk("vcpu_id %d timer %d time_now %llu exp_time %llu", 1194*4882a593Smuzhiyun __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1195*4882a593Smuzhiyun __entry->exp_time) 1196*4882a593Smuzhiyun ); 1197*4882a593Smuzhiyun 1198*4882a593Smuzhiyun /* 1199*4882a593Smuzhiyun * Tracepoint for stimer_start(one-shot timer case). 1200*4882a593Smuzhiyun */ 1201*4882a593Smuzhiyun TRACE_EVENT(kvm_hv_stimer_start_one_shot, 1202*4882a593Smuzhiyun TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 count), 1203*4882a593Smuzhiyun TP_ARGS(vcpu_id, timer_index, time_now, count), 1204*4882a593Smuzhiyun 1205*4882a593Smuzhiyun TP_STRUCT__entry( 1206*4882a593Smuzhiyun __field(int, vcpu_id) 1207*4882a593Smuzhiyun __field(int, timer_index) 1208*4882a593Smuzhiyun __field(u64, time_now) 1209*4882a593Smuzhiyun __field(u64, count) 1210*4882a593Smuzhiyun ), 1211*4882a593Smuzhiyun 1212*4882a593Smuzhiyun TP_fast_assign( 1213*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 1214*4882a593Smuzhiyun __entry->timer_index = timer_index; 1215*4882a593Smuzhiyun __entry->time_now = time_now; 1216*4882a593Smuzhiyun __entry->count = count; 1217*4882a593Smuzhiyun ), 1218*4882a593Smuzhiyun 1219*4882a593Smuzhiyun TP_printk("vcpu_id %d timer %d time_now %llu count %llu", 1220*4882a593Smuzhiyun __entry->vcpu_id, __entry->timer_index, __entry->time_now, 1221*4882a593Smuzhiyun __entry->count) 1222*4882a593Smuzhiyun ); 1223*4882a593Smuzhiyun 1224*4882a593Smuzhiyun /* 1225*4882a593Smuzhiyun * Tracepoint for stimer_timer_callback. 1226*4882a593Smuzhiyun */ 1227*4882a593Smuzhiyun TRACE_EVENT(kvm_hv_stimer_callback, 1228*4882a593Smuzhiyun TP_PROTO(int vcpu_id, int timer_index), 1229*4882a593Smuzhiyun TP_ARGS(vcpu_id, timer_index), 1230*4882a593Smuzhiyun 1231*4882a593Smuzhiyun TP_STRUCT__entry( 1232*4882a593Smuzhiyun __field(int, vcpu_id) 1233*4882a593Smuzhiyun __field(int, timer_index) 1234*4882a593Smuzhiyun ), 1235*4882a593Smuzhiyun 1236*4882a593Smuzhiyun TP_fast_assign( 1237*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 1238*4882a593Smuzhiyun __entry->timer_index = timer_index; 1239*4882a593Smuzhiyun ), 1240*4882a593Smuzhiyun 1241*4882a593Smuzhiyun TP_printk("vcpu_id %d timer %d", 1242*4882a593Smuzhiyun __entry->vcpu_id, __entry->timer_index) 1243*4882a593Smuzhiyun ); 1244*4882a593Smuzhiyun 1245*4882a593Smuzhiyun /* 1246*4882a593Smuzhiyun * Tracepoint for stimer_expiration. 1247*4882a593Smuzhiyun */ 1248*4882a593Smuzhiyun TRACE_EVENT(kvm_hv_stimer_expiration, 1249*4882a593Smuzhiyun TP_PROTO(int vcpu_id, int timer_index, int direct, int msg_send_result), 1250*4882a593Smuzhiyun TP_ARGS(vcpu_id, timer_index, direct, msg_send_result), 1251*4882a593Smuzhiyun 1252*4882a593Smuzhiyun TP_STRUCT__entry( 1253*4882a593Smuzhiyun __field(int, vcpu_id) 1254*4882a593Smuzhiyun __field(int, timer_index) 1255*4882a593Smuzhiyun __field(int, direct) 1256*4882a593Smuzhiyun __field(int, msg_send_result) 1257*4882a593Smuzhiyun ), 1258*4882a593Smuzhiyun 1259*4882a593Smuzhiyun TP_fast_assign( 1260*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 1261*4882a593Smuzhiyun __entry->timer_index = timer_index; 1262*4882a593Smuzhiyun __entry->direct = direct; 1263*4882a593Smuzhiyun __entry->msg_send_result = msg_send_result; 1264*4882a593Smuzhiyun ), 1265*4882a593Smuzhiyun 1266*4882a593Smuzhiyun TP_printk("vcpu_id %d timer %d direct %d send result %d", 1267*4882a593Smuzhiyun __entry->vcpu_id, __entry->timer_index, 1268*4882a593Smuzhiyun __entry->direct, __entry->msg_send_result) 1269*4882a593Smuzhiyun ); 1270*4882a593Smuzhiyun 1271*4882a593Smuzhiyun /* 1272*4882a593Smuzhiyun * Tracepoint for stimer_cleanup. 1273*4882a593Smuzhiyun */ 1274*4882a593Smuzhiyun TRACE_EVENT(kvm_hv_stimer_cleanup, 1275*4882a593Smuzhiyun TP_PROTO(int vcpu_id, int timer_index), 1276*4882a593Smuzhiyun TP_ARGS(vcpu_id, timer_index), 1277*4882a593Smuzhiyun 1278*4882a593Smuzhiyun TP_STRUCT__entry( 1279*4882a593Smuzhiyun __field(int, vcpu_id) 1280*4882a593Smuzhiyun __field(int, timer_index) 1281*4882a593Smuzhiyun ), 1282*4882a593Smuzhiyun 1283*4882a593Smuzhiyun TP_fast_assign( 1284*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 1285*4882a593Smuzhiyun __entry->timer_index = timer_index; 1286*4882a593Smuzhiyun ), 1287*4882a593Smuzhiyun 1288*4882a593Smuzhiyun TP_printk("vcpu_id %d timer %d", 1289*4882a593Smuzhiyun __entry->vcpu_id, __entry->timer_index) 1290*4882a593Smuzhiyun ); 1291*4882a593Smuzhiyun 1292*4882a593Smuzhiyun TRACE_EVENT(kvm_apicv_update_request, 1293*4882a593Smuzhiyun TP_PROTO(bool activate, unsigned long bit), 1294*4882a593Smuzhiyun TP_ARGS(activate, bit), 1295*4882a593Smuzhiyun 1296*4882a593Smuzhiyun TP_STRUCT__entry( 1297*4882a593Smuzhiyun __field(bool, activate) 1298*4882a593Smuzhiyun __field(unsigned long, bit) 1299*4882a593Smuzhiyun ), 1300*4882a593Smuzhiyun 1301*4882a593Smuzhiyun TP_fast_assign( 1302*4882a593Smuzhiyun __entry->activate = activate; 1303*4882a593Smuzhiyun __entry->bit = bit; 1304*4882a593Smuzhiyun ), 1305*4882a593Smuzhiyun 1306*4882a593Smuzhiyun TP_printk("%s bit=%lu", 1307*4882a593Smuzhiyun __entry->activate ? "activate" : "deactivate", 1308*4882a593Smuzhiyun __entry->bit) 1309*4882a593Smuzhiyun ); 1310*4882a593Smuzhiyun 1311*4882a593Smuzhiyun /* 1312*4882a593Smuzhiyun * Tracepoint for AMD AVIC 1313*4882a593Smuzhiyun */ 1314*4882a593Smuzhiyun TRACE_EVENT(kvm_avic_incomplete_ipi, 1315*4882a593Smuzhiyun TP_PROTO(u32 vcpu, u32 icrh, u32 icrl, u32 id, u32 index), 1316*4882a593Smuzhiyun TP_ARGS(vcpu, icrh, icrl, id, index), 1317*4882a593Smuzhiyun 1318*4882a593Smuzhiyun TP_STRUCT__entry( 1319*4882a593Smuzhiyun __field(u32, vcpu) 1320*4882a593Smuzhiyun __field(u32, icrh) 1321*4882a593Smuzhiyun __field(u32, icrl) 1322*4882a593Smuzhiyun __field(u32, id) 1323*4882a593Smuzhiyun __field(u32, index) 1324*4882a593Smuzhiyun ), 1325*4882a593Smuzhiyun 1326*4882a593Smuzhiyun TP_fast_assign( 1327*4882a593Smuzhiyun __entry->vcpu = vcpu; 1328*4882a593Smuzhiyun __entry->icrh = icrh; 1329*4882a593Smuzhiyun __entry->icrl = icrl; 1330*4882a593Smuzhiyun __entry->id = id; 1331*4882a593Smuzhiyun __entry->index = index; 1332*4882a593Smuzhiyun ), 1333*4882a593Smuzhiyun 1334*4882a593Smuzhiyun TP_printk("vcpu=%u, icrh:icrl=%#010x:%08x, id=%u, index=%u", 1335*4882a593Smuzhiyun __entry->vcpu, __entry->icrh, __entry->icrl, 1336*4882a593Smuzhiyun __entry->id, __entry->index) 1337*4882a593Smuzhiyun ); 1338*4882a593Smuzhiyun 1339*4882a593Smuzhiyun TRACE_EVENT(kvm_avic_unaccelerated_access, 1340*4882a593Smuzhiyun TP_PROTO(u32 vcpu, u32 offset, bool ft, bool rw, u32 vec), 1341*4882a593Smuzhiyun TP_ARGS(vcpu, offset, ft, rw, vec), 1342*4882a593Smuzhiyun 1343*4882a593Smuzhiyun TP_STRUCT__entry( 1344*4882a593Smuzhiyun __field(u32, vcpu) 1345*4882a593Smuzhiyun __field(u32, offset) 1346*4882a593Smuzhiyun __field(bool, ft) 1347*4882a593Smuzhiyun __field(bool, rw) 1348*4882a593Smuzhiyun __field(u32, vec) 1349*4882a593Smuzhiyun ), 1350*4882a593Smuzhiyun 1351*4882a593Smuzhiyun TP_fast_assign( 1352*4882a593Smuzhiyun __entry->vcpu = vcpu; 1353*4882a593Smuzhiyun __entry->offset = offset; 1354*4882a593Smuzhiyun __entry->ft = ft; 1355*4882a593Smuzhiyun __entry->rw = rw; 1356*4882a593Smuzhiyun __entry->vec = vec; 1357*4882a593Smuzhiyun ), 1358*4882a593Smuzhiyun 1359*4882a593Smuzhiyun TP_printk("vcpu=%u, offset=%#x(%s), %s, %s, vec=%#x", 1360*4882a593Smuzhiyun __entry->vcpu, 1361*4882a593Smuzhiyun __entry->offset, 1362*4882a593Smuzhiyun __print_symbolic(__entry->offset, kvm_trace_symbol_apic), 1363*4882a593Smuzhiyun __entry->ft ? "trap" : "fault", 1364*4882a593Smuzhiyun __entry->rw ? "write" : "read", 1365*4882a593Smuzhiyun __entry->vec) 1366*4882a593Smuzhiyun ); 1367*4882a593Smuzhiyun 1368*4882a593Smuzhiyun TRACE_EVENT(kvm_avic_ga_log, 1369*4882a593Smuzhiyun TP_PROTO(u32 vmid, u32 vcpuid), 1370*4882a593Smuzhiyun TP_ARGS(vmid, vcpuid), 1371*4882a593Smuzhiyun 1372*4882a593Smuzhiyun TP_STRUCT__entry( 1373*4882a593Smuzhiyun __field(u32, vmid) 1374*4882a593Smuzhiyun __field(u32, vcpuid) 1375*4882a593Smuzhiyun ), 1376*4882a593Smuzhiyun 1377*4882a593Smuzhiyun TP_fast_assign( 1378*4882a593Smuzhiyun __entry->vmid = vmid; 1379*4882a593Smuzhiyun __entry->vcpuid = vcpuid; 1380*4882a593Smuzhiyun ), 1381*4882a593Smuzhiyun 1382*4882a593Smuzhiyun TP_printk("vmid=%u, vcpuid=%u", 1383*4882a593Smuzhiyun __entry->vmid, __entry->vcpuid) 1384*4882a593Smuzhiyun ); 1385*4882a593Smuzhiyun 1386*4882a593Smuzhiyun TRACE_EVENT(kvm_hv_timer_state, 1387*4882a593Smuzhiyun TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use), 1388*4882a593Smuzhiyun TP_ARGS(vcpu_id, hv_timer_in_use), 1389*4882a593Smuzhiyun TP_STRUCT__entry( 1390*4882a593Smuzhiyun __field(unsigned int, vcpu_id) 1391*4882a593Smuzhiyun __field(unsigned int, hv_timer_in_use) 1392*4882a593Smuzhiyun ), 1393*4882a593Smuzhiyun TP_fast_assign( 1394*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 1395*4882a593Smuzhiyun __entry->hv_timer_in_use = hv_timer_in_use; 1396*4882a593Smuzhiyun ), 1397*4882a593Smuzhiyun TP_printk("vcpu_id %x hv_timer %x", 1398*4882a593Smuzhiyun __entry->vcpu_id, 1399*4882a593Smuzhiyun __entry->hv_timer_in_use) 1400*4882a593Smuzhiyun ); 1401*4882a593Smuzhiyun 1402*4882a593Smuzhiyun /* 1403*4882a593Smuzhiyun * Tracepoint for kvm_hv_flush_tlb. 1404*4882a593Smuzhiyun */ 1405*4882a593Smuzhiyun TRACE_EVENT(kvm_hv_flush_tlb, 1406*4882a593Smuzhiyun TP_PROTO(u64 processor_mask, u64 address_space, u64 flags), 1407*4882a593Smuzhiyun TP_ARGS(processor_mask, address_space, flags), 1408*4882a593Smuzhiyun 1409*4882a593Smuzhiyun TP_STRUCT__entry( 1410*4882a593Smuzhiyun __field(u64, processor_mask) 1411*4882a593Smuzhiyun __field(u64, address_space) 1412*4882a593Smuzhiyun __field(u64, flags) 1413*4882a593Smuzhiyun ), 1414*4882a593Smuzhiyun 1415*4882a593Smuzhiyun TP_fast_assign( 1416*4882a593Smuzhiyun __entry->processor_mask = processor_mask; 1417*4882a593Smuzhiyun __entry->address_space = address_space; 1418*4882a593Smuzhiyun __entry->flags = flags; 1419*4882a593Smuzhiyun ), 1420*4882a593Smuzhiyun 1421*4882a593Smuzhiyun TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx", 1422*4882a593Smuzhiyun __entry->processor_mask, __entry->address_space, 1423*4882a593Smuzhiyun __entry->flags) 1424*4882a593Smuzhiyun ); 1425*4882a593Smuzhiyun 1426*4882a593Smuzhiyun /* 1427*4882a593Smuzhiyun * Tracepoint for kvm_hv_flush_tlb_ex. 1428*4882a593Smuzhiyun */ 1429*4882a593Smuzhiyun TRACE_EVENT(kvm_hv_flush_tlb_ex, 1430*4882a593Smuzhiyun TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags), 1431*4882a593Smuzhiyun TP_ARGS(valid_bank_mask, format, address_space, flags), 1432*4882a593Smuzhiyun 1433*4882a593Smuzhiyun TP_STRUCT__entry( 1434*4882a593Smuzhiyun __field(u64, valid_bank_mask) 1435*4882a593Smuzhiyun __field(u64, format) 1436*4882a593Smuzhiyun __field(u64, address_space) 1437*4882a593Smuzhiyun __field(u64, flags) 1438*4882a593Smuzhiyun ), 1439*4882a593Smuzhiyun 1440*4882a593Smuzhiyun TP_fast_assign( 1441*4882a593Smuzhiyun __entry->valid_bank_mask = valid_bank_mask; 1442*4882a593Smuzhiyun __entry->format = format; 1443*4882a593Smuzhiyun __entry->address_space = address_space; 1444*4882a593Smuzhiyun __entry->flags = flags; 1445*4882a593Smuzhiyun ), 1446*4882a593Smuzhiyun 1447*4882a593Smuzhiyun TP_printk("valid_bank_mask 0x%llx format 0x%llx " 1448*4882a593Smuzhiyun "address_space 0x%llx flags 0x%llx", 1449*4882a593Smuzhiyun __entry->valid_bank_mask, __entry->format, 1450*4882a593Smuzhiyun __entry->address_space, __entry->flags) 1451*4882a593Smuzhiyun ); 1452*4882a593Smuzhiyun 1453*4882a593Smuzhiyun /* 1454*4882a593Smuzhiyun * Tracepoints for kvm_hv_send_ipi. 1455*4882a593Smuzhiyun */ 1456*4882a593Smuzhiyun TRACE_EVENT(kvm_hv_send_ipi, 1457*4882a593Smuzhiyun TP_PROTO(u32 vector, u64 processor_mask), 1458*4882a593Smuzhiyun TP_ARGS(vector, processor_mask), 1459*4882a593Smuzhiyun 1460*4882a593Smuzhiyun TP_STRUCT__entry( 1461*4882a593Smuzhiyun __field(u32, vector) 1462*4882a593Smuzhiyun __field(u64, processor_mask) 1463*4882a593Smuzhiyun ), 1464*4882a593Smuzhiyun 1465*4882a593Smuzhiyun TP_fast_assign( 1466*4882a593Smuzhiyun __entry->vector = vector; 1467*4882a593Smuzhiyun __entry->processor_mask = processor_mask; 1468*4882a593Smuzhiyun ), 1469*4882a593Smuzhiyun 1470*4882a593Smuzhiyun TP_printk("vector %x processor_mask 0x%llx", 1471*4882a593Smuzhiyun __entry->vector, __entry->processor_mask) 1472*4882a593Smuzhiyun ); 1473*4882a593Smuzhiyun 1474*4882a593Smuzhiyun TRACE_EVENT(kvm_hv_send_ipi_ex, 1475*4882a593Smuzhiyun TP_PROTO(u32 vector, u64 format, u64 valid_bank_mask), 1476*4882a593Smuzhiyun TP_ARGS(vector, format, valid_bank_mask), 1477*4882a593Smuzhiyun 1478*4882a593Smuzhiyun TP_STRUCT__entry( 1479*4882a593Smuzhiyun __field(u32, vector) 1480*4882a593Smuzhiyun __field(u64, format) 1481*4882a593Smuzhiyun __field(u64, valid_bank_mask) 1482*4882a593Smuzhiyun ), 1483*4882a593Smuzhiyun 1484*4882a593Smuzhiyun TP_fast_assign( 1485*4882a593Smuzhiyun __entry->vector = vector; 1486*4882a593Smuzhiyun __entry->format = format; 1487*4882a593Smuzhiyun __entry->valid_bank_mask = valid_bank_mask; 1488*4882a593Smuzhiyun ), 1489*4882a593Smuzhiyun 1490*4882a593Smuzhiyun TP_printk("vector %x format %llx valid_bank_mask 0x%llx", 1491*4882a593Smuzhiyun __entry->vector, __entry->format, 1492*4882a593Smuzhiyun __entry->valid_bank_mask) 1493*4882a593Smuzhiyun ); 1494*4882a593Smuzhiyun 1495*4882a593Smuzhiyun TRACE_EVENT(kvm_pv_tlb_flush, 1496*4882a593Smuzhiyun TP_PROTO(unsigned int vcpu_id, bool need_flush_tlb), 1497*4882a593Smuzhiyun TP_ARGS(vcpu_id, need_flush_tlb), 1498*4882a593Smuzhiyun 1499*4882a593Smuzhiyun TP_STRUCT__entry( 1500*4882a593Smuzhiyun __field( unsigned int, vcpu_id ) 1501*4882a593Smuzhiyun __field( bool, need_flush_tlb ) 1502*4882a593Smuzhiyun ), 1503*4882a593Smuzhiyun 1504*4882a593Smuzhiyun TP_fast_assign( 1505*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 1506*4882a593Smuzhiyun __entry->need_flush_tlb = need_flush_tlb; 1507*4882a593Smuzhiyun ), 1508*4882a593Smuzhiyun 1509*4882a593Smuzhiyun TP_printk("vcpu %u need_flush_tlb %s", __entry->vcpu_id, 1510*4882a593Smuzhiyun __entry->need_flush_tlb ? "true" : "false") 1511*4882a593Smuzhiyun ); 1512*4882a593Smuzhiyun 1513*4882a593Smuzhiyun /* 1514*4882a593Smuzhiyun * Tracepoint for failed nested VMX VM-Enter. 1515*4882a593Smuzhiyun */ 1516*4882a593Smuzhiyun TRACE_EVENT(kvm_nested_vmenter_failed, 1517*4882a593Smuzhiyun TP_PROTO(const char *msg, u32 err), 1518*4882a593Smuzhiyun TP_ARGS(msg, err), 1519*4882a593Smuzhiyun 1520*4882a593Smuzhiyun TP_STRUCT__entry( 1521*4882a593Smuzhiyun __string(msg, msg) 1522*4882a593Smuzhiyun __field(u32, err) 1523*4882a593Smuzhiyun ), 1524*4882a593Smuzhiyun 1525*4882a593Smuzhiyun TP_fast_assign( 1526*4882a593Smuzhiyun __assign_str(msg, msg); 1527*4882a593Smuzhiyun __entry->err = err; 1528*4882a593Smuzhiyun ), 1529*4882a593Smuzhiyun 1530*4882a593Smuzhiyun TP_printk("%s%s", __get_str(msg), !__entry->err ? "" : 1531*4882a593Smuzhiyun __print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS)) 1532*4882a593Smuzhiyun ); 1533*4882a593Smuzhiyun 1534*4882a593Smuzhiyun /* 1535*4882a593Smuzhiyun * Tracepoint for syndbg_set_msr. 1536*4882a593Smuzhiyun */ 1537*4882a593Smuzhiyun TRACE_EVENT(kvm_hv_syndbg_set_msr, 1538*4882a593Smuzhiyun TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data), 1539*4882a593Smuzhiyun TP_ARGS(vcpu_id, vp_index, msr, data), 1540*4882a593Smuzhiyun 1541*4882a593Smuzhiyun TP_STRUCT__entry( 1542*4882a593Smuzhiyun __field(int, vcpu_id) 1543*4882a593Smuzhiyun __field(u32, vp_index) 1544*4882a593Smuzhiyun __field(u32, msr) 1545*4882a593Smuzhiyun __field(u64, data) 1546*4882a593Smuzhiyun ), 1547*4882a593Smuzhiyun 1548*4882a593Smuzhiyun TP_fast_assign( 1549*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 1550*4882a593Smuzhiyun __entry->vp_index = vp_index; 1551*4882a593Smuzhiyun __entry->msr = msr; 1552*4882a593Smuzhiyun __entry->data = data; 1553*4882a593Smuzhiyun ), 1554*4882a593Smuzhiyun 1555*4882a593Smuzhiyun TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx", 1556*4882a593Smuzhiyun __entry->vcpu_id, __entry->vp_index, __entry->msr, 1557*4882a593Smuzhiyun __entry->data) 1558*4882a593Smuzhiyun ); 1559*4882a593Smuzhiyun 1560*4882a593Smuzhiyun /* 1561*4882a593Smuzhiyun * Tracepoint for syndbg_get_msr. 1562*4882a593Smuzhiyun */ 1563*4882a593Smuzhiyun TRACE_EVENT(kvm_hv_syndbg_get_msr, 1564*4882a593Smuzhiyun TP_PROTO(int vcpu_id, u32 vp_index, u32 msr, u64 data), 1565*4882a593Smuzhiyun TP_ARGS(vcpu_id, vp_index, msr, data), 1566*4882a593Smuzhiyun 1567*4882a593Smuzhiyun TP_STRUCT__entry( 1568*4882a593Smuzhiyun __field(int, vcpu_id) 1569*4882a593Smuzhiyun __field(u32, vp_index) 1570*4882a593Smuzhiyun __field(u32, msr) 1571*4882a593Smuzhiyun __field(u64, data) 1572*4882a593Smuzhiyun ), 1573*4882a593Smuzhiyun 1574*4882a593Smuzhiyun TP_fast_assign( 1575*4882a593Smuzhiyun __entry->vcpu_id = vcpu_id; 1576*4882a593Smuzhiyun __entry->vp_index = vp_index; 1577*4882a593Smuzhiyun __entry->msr = msr; 1578*4882a593Smuzhiyun __entry->data = data; 1579*4882a593Smuzhiyun ), 1580*4882a593Smuzhiyun 1581*4882a593Smuzhiyun TP_printk("vcpu_id %d vp_index %u msr 0x%x data 0x%llx", 1582*4882a593Smuzhiyun __entry->vcpu_id, __entry->vp_index, __entry->msr, 1583*4882a593Smuzhiyun __entry->data) 1584*4882a593Smuzhiyun ); 1585*4882a593Smuzhiyun #endif /* _TRACE_KVM_H */ 1586*4882a593Smuzhiyun 1587*4882a593Smuzhiyun #undef TRACE_INCLUDE_PATH 1588*4882a593Smuzhiyun #define TRACE_INCLUDE_PATH ../../arch/x86/kvm 1589*4882a593Smuzhiyun #undef TRACE_INCLUDE_FILE 1590*4882a593Smuzhiyun #define TRACE_INCLUDE_FILE trace 1591*4882a593Smuzhiyun 1592*4882a593Smuzhiyun /* This part must be outside protection */ 1593*4882a593Smuzhiyun #include <trace/define_trace.h> 1594