1#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVM_H
3
4#include <linux/tracepoint.h>
5#include <asm/vmx.h>
6#include <asm/svm.h>
7#include <asm/clocksource.h>
8#include <asm/pvclock-abi.h>
9
10#undef TRACE_SYSTEM
11#define TRACE_SYSTEM kvm
12
13/*
14 * Tracepoint for guest mode entry.
15 */
16TRACE_EVENT(kvm_entry,
17	TP_PROTO(unsigned int vcpu_id),
18	TP_ARGS(vcpu_id),
19
20	TP_STRUCT__entry(
21		__field(	unsigned int,	vcpu_id		)
22	),
23
24	TP_fast_assign(
25		__entry->vcpu_id	= vcpu_id;
26	),
27
28	TP_printk("vcpu %u", __entry->vcpu_id)
29);
30
31/*
32 * Tracepoint for hypercall.
33 */
34TRACE_EVENT(kvm_hypercall,
35	TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
36		 unsigned long a2, unsigned long a3),
37	TP_ARGS(nr, a0, a1, a2, a3),
38
39	TP_STRUCT__entry(
40		__field(	unsigned long, 	nr		)
41		__field(	unsigned long,	a0		)
42		__field(	unsigned long,	a1		)
43		__field(	unsigned long,	a2		)
44		__field(	unsigned long,	a3		)
45	),
46
47	TP_fast_assign(
48		__entry->nr		= nr;
49		__entry->a0		= a0;
50		__entry->a1		= a1;
51		__entry->a2		= a2;
52		__entry->a3		= a3;
53	),
54
55	TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
56		 __entry->nr, __entry->a0, __entry->a1,  __entry->a2,
57		 __entry->a3)
58);
59
60/*
61 * Tracepoint for hypercall.
62 */
63TRACE_EVENT(kvm_hv_hypercall,
64	TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx,
65		 __u64 ingpa, __u64 outgpa),
66	TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa),
67
68	TP_STRUCT__entry(
69		__field(	__u16,		rep_cnt		)
70		__field(	__u16,		rep_idx		)
71		__field(	__u64,		ingpa		)
72		__field(	__u64,		outgpa		)
73		__field(	__u16, 		code		)
74		__field(	bool,		fast		)
75	),
76
77	TP_fast_assign(
78		__entry->rep_cnt	= rep_cnt;
79		__entry->rep_idx	= rep_idx;
80		__entry->ingpa		= ingpa;
81		__entry->outgpa		= outgpa;
82		__entry->code		= code;
83		__entry->fast		= fast;
84	),
85
86	TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
87		  __entry->code, __entry->fast ? "fast" : "slow",
88		  __entry->rep_cnt, __entry->rep_idx,  __entry->ingpa,
89		  __entry->outgpa)
90);
91
92/*
93 * Tracepoint for PIO.
94 */
95
96#define KVM_PIO_IN   0
97#define KVM_PIO_OUT  1
98
99TRACE_EVENT(kvm_pio,
100	TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
101		 unsigned int count, void *data),
102	TP_ARGS(rw, port, size, count, data),
103
104	TP_STRUCT__entry(
105		__field(	unsigned int, 	rw		)
106		__field(	unsigned int, 	port		)
107		__field(	unsigned int, 	size		)
108		__field(	unsigned int,	count		)
109		__field(	unsigned int,	val		)
110	),
111
112	TP_fast_assign(
113		__entry->rw		= rw;
114		__entry->port		= port;
115		__entry->size		= size;
116		__entry->count		= count;
117		if (size == 1)
118			__entry->val	= *(unsigned char *)data;
119		else if (size == 2)
120			__entry->val	= *(unsigned short *)data;
121		else
122			__entry->val	= *(unsigned int *)data;
123	),
124
125	TP_printk("pio_%s at 0x%x size %d count %d val 0x%x %s",
126		  __entry->rw ? "write" : "read",
127		  __entry->port, __entry->size, __entry->count, __entry->val,
128		  __entry->count > 1 ? "(...)" : "")
129);
130
131/*
132 * Tracepoint for cpuid.
133 */
134TRACE_EVENT(kvm_cpuid,
135	TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
136		 unsigned long rcx, unsigned long rdx),
137	TP_ARGS(function, rax, rbx, rcx, rdx),
138
139	TP_STRUCT__entry(
140		__field(	unsigned int,	function	)
141		__field(	unsigned long,	rax		)
142		__field(	unsigned long,	rbx		)
143		__field(	unsigned long,	rcx		)
144		__field(	unsigned long,	rdx		)
145	),
146
147	TP_fast_assign(
148		__entry->function	= function;
149		__entry->rax		= rax;
150		__entry->rbx		= rbx;
151		__entry->rcx		= rcx;
152		__entry->rdx		= rdx;
153	),
154
155	TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx",
156		  __entry->function, __entry->rax,
157		  __entry->rbx, __entry->rcx, __entry->rdx)
158);
159
160#define AREG(x) { APIC_##x, "APIC_" #x }
161
162#define kvm_trace_symbol_apic						    \
163	AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI),    \
164	AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR),  \
165	AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
166	AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR),   \
167	AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT),  \
168	AREG(ECTRL)
169/*
170 * Tracepoint for apic access.
171 */
172TRACE_EVENT(kvm_apic,
173	TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val),
174	TP_ARGS(rw, reg, val),
175
176	TP_STRUCT__entry(
177		__field(	unsigned int,	rw		)
178		__field(	unsigned int,	reg		)
179		__field(	unsigned int,	val		)
180	),
181
182	TP_fast_assign(
183		__entry->rw		= rw;
184		__entry->reg		= reg;
185		__entry->val		= val;
186	),
187
188	TP_printk("apic_%s %s = 0x%x",
189		  __entry->rw ? "write" : "read",
190		  __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
191		  __entry->val)
192);
193
194#define trace_kvm_apic_read(reg, val)		trace_kvm_apic(0, reg, val)
195#define trace_kvm_apic_write(reg, val)		trace_kvm_apic(1, reg, val)
196
197#define KVM_ISA_VMX   1
198#define KVM_ISA_SVM   2
199
200/*
201 * Tracepoint for kvm guest exit:
202 */
203TRACE_EVENT(kvm_exit,
204	TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
205	TP_ARGS(exit_reason, vcpu, isa),
206
207	TP_STRUCT__entry(
208		__field(	unsigned int,	exit_reason	)
209		__field(	unsigned long,	guest_rip	)
210		__field(	u32,	        isa             )
211		__field(	u64,	        info1           )
212		__field(	u64,	        info2           )
213	),
214
215	TP_fast_assign(
216		__entry->exit_reason	= exit_reason;
217		__entry->guest_rip	= kvm_rip_read(vcpu);
218		__entry->isa            = isa;
219		kvm_x86_ops->get_exit_info(vcpu, &__entry->info1,
220					   &__entry->info2);
221	),
222
223	TP_printk("reason %s rip 0x%lx info %llx %llx",
224		 (__entry->isa == KVM_ISA_VMX) ?
225		 __print_symbolic(__entry->exit_reason, VMX_EXIT_REASONS) :
226		 __print_symbolic(__entry->exit_reason, SVM_EXIT_REASONS),
227		 __entry->guest_rip, __entry->info1, __entry->info2)
228);
229
230/*
231 * Tracepoint for kvm interrupt injection:
232 */
233TRACE_EVENT(kvm_inj_virq,
234	TP_PROTO(unsigned int irq),
235	TP_ARGS(irq),
236
237	TP_STRUCT__entry(
238		__field(	unsigned int,	irq		)
239	),
240
241	TP_fast_assign(
242		__entry->irq		= irq;
243	),
244
245	TP_printk("irq %u", __entry->irq)
246);
247
248#define EXS(x) { x##_VECTOR, "#" #x }
249
250#define kvm_trace_sym_exc						\
251	EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM),	\
252	EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF),		\
253	EXS(MF), EXS(AC), EXS(MC)
254
255/*
256 * Tracepoint for kvm interrupt injection:
257 */
258TRACE_EVENT(kvm_inj_exception,
259	TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
260	TP_ARGS(exception, has_error, error_code),
261
262	TP_STRUCT__entry(
263		__field(	u8,	exception	)
264		__field(	u8,	has_error	)
265		__field(	u32,	error_code	)
266	),
267
268	TP_fast_assign(
269		__entry->exception	= exception;
270		__entry->has_error	= has_error;
271		__entry->error_code	= error_code;
272	),
273
274	TP_printk("%s (0x%x)",
275		  __print_symbolic(__entry->exception, kvm_trace_sym_exc),
276		  /* FIXME: don't print error_code if not present */
277		  __entry->has_error ? __entry->error_code : 0)
278);
279
280/*
281 * Tracepoint for page fault.
282 */
283TRACE_EVENT(kvm_page_fault,
284	TP_PROTO(unsigned long fault_address, unsigned int error_code),
285	TP_ARGS(fault_address, error_code),
286
287	TP_STRUCT__entry(
288		__field(	unsigned long,	fault_address	)
289		__field(	unsigned int,	error_code	)
290	),
291
292	TP_fast_assign(
293		__entry->fault_address	= fault_address;
294		__entry->error_code	= error_code;
295	),
296
297	TP_printk("address %lx error_code %x",
298		  __entry->fault_address, __entry->error_code)
299);
300
301/*
302 * Tracepoint for guest MSR access.
303 */
304TRACE_EVENT(kvm_msr,
305	TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
306	TP_ARGS(write, ecx, data, exception),
307
308	TP_STRUCT__entry(
309		__field(	unsigned,	write		)
310		__field(	u32,		ecx		)
311		__field(	u64,		data		)
312		__field(	u8,		exception	)
313	),
314
315	TP_fast_assign(
316		__entry->write		= write;
317		__entry->ecx		= ecx;
318		__entry->data		= data;
319		__entry->exception	= exception;
320	),
321
322	TP_printk("msr_%s %x = 0x%llx%s",
323		  __entry->write ? "write" : "read",
324		  __entry->ecx, __entry->data,
325		  __entry->exception ? " (#GP)" : "")
326);
327
328#define trace_kvm_msr_read(ecx, data)      trace_kvm_msr(0, ecx, data, false)
329#define trace_kvm_msr_write(ecx, data)     trace_kvm_msr(1, ecx, data, false)
330#define trace_kvm_msr_read_ex(ecx)         trace_kvm_msr(0, ecx, 0, true)
331#define trace_kvm_msr_write_ex(ecx, data)  trace_kvm_msr(1, ecx, data, true)
332
333/*
334 * Tracepoint for guest CR access.
335 */
336TRACE_EVENT(kvm_cr,
337	TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
338	TP_ARGS(rw, cr, val),
339
340	TP_STRUCT__entry(
341		__field(	unsigned int,	rw		)
342		__field(	unsigned int,	cr		)
343		__field(	unsigned long,	val		)
344	),
345
346	TP_fast_assign(
347		__entry->rw		= rw;
348		__entry->cr		= cr;
349		__entry->val		= val;
350	),
351
352	TP_printk("cr_%s %x = 0x%lx",
353		  __entry->rw ? "write" : "read",
354		  __entry->cr, __entry->val)
355);
356
357#define trace_kvm_cr_read(cr, val)		trace_kvm_cr(0, cr, val)
358#define trace_kvm_cr_write(cr, val)		trace_kvm_cr(1, cr, val)
359
360TRACE_EVENT(kvm_pic_set_irq,
361	    TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
362	    TP_ARGS(chip, pin, elcr, imr, coalesced),
363
364	TP_STRUCT__entry(
365		__field(	__u8,		chip		)
366		__field(	__u8,		pin		)
367		__field(	__u8,		elcr		)
368		__field(	__u8,		imr		)
369		__field(	bool,		coalesced	)
370	),
371
372	TP_fast_assign(
373		__entry->chip		= chip;
374		__entry->pin		= pin;
375		__entry->elcr		= elcr;
376		__entry->imr		= imr;
377		__entry->coalesced	= coalesced;
378	),
379
380	TP_printk("chip %u pin %u (%s%s)%s",
381		  __entry->chip, __entry->pin,
382		  (__entry->elcr & (1 << __entry->pin)) ? "level":"edge",
383		  (__entry->imr & (1 << __entry->pin)) ? "|masked":"",
384		  __entry->coalesced ? " (coalesced)" : "")
385);
386
387#define kvm_apic_dst_shorthand		\
388	{0x0, "dst"},			\
389	{0x1, "self"},			\
390	{0x2, "all"},			\
391	{0x3, "all-but-self"}
392
393TRACE_EVENT(kvm_apic_ipi,
394	    TP_PROTO(__u32 icr_low, __u32 dest_id),
395	    TP_ARGS(icr_low, dest_id),
396
397	TP_STRUCT__entry(
398		__field(	__u32,		icr_low		)
399		__field(	__u32,		dest_id		)
400	),
401
402	TP_fast_assign(
403		__entry->icr_low	= icr_low;
404		__entry->dest_id	= dest_id;
405	),
406
407	TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)",
408		  __entry->dest_id, (u8)__entry->icr_low,
409		  __print_symbolic((__entry->icr_low >> 8 & 0x7),
410				   kvm_deliver_mode),
411		  (__entry->icr_low & (1<<11)) ? "logical" : "physical",
412		  (__entry->icr_low & (1<<14)) ? "assert" : "de-assert",
413		  (__entry->icr_low & (1<<15)) ? "level" : "edge",
414		  __print_symbolic((__entry->icr_low >> 18 & 0x3),
415				   kvm_apic_dst_shorthand))
416);
417
418TRACE_EVENT(kvm_apic_accept_irq,
419	    TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec),
420	    TP_ARGS(apicid, dm, tm, vec),
421
422	TP_STRUCT__entry(
423		__field(	__u32,		apicid		)
424		__field(	__u16,		dm		)
425		__field(	__u8,		tm		)
426		__field(	__u8,		vec		)
427	),
428
429	TP_fast_assign(
430		__entry->apicid		= apicid;
431		__entry->dm		= dm;
432		__entry->tm		= tm;
433		__entry->vec		= vec;
434	),
435
436	TP_printk("apicid %x vec %u (%s|%s)",
437		  __entry->apicid, __entry->vec,
438		  __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
439		  __entry->tm ? "level" : "edge")
440);
441
442TRACE_EVENT(kvm_eoi,
443	    TP_PROTO(struct kvm_lapic *apic, int vector),
444	    TP_ARGS(apic, vector),
445
446	TP_STRUCT__entry(
447		__field(	__u32,		apicid		)
448		__field(	int,		vector		)
449	),
450
451	TP_fast_assign(
452		__entry->apicid		= apic->vcpu->vcpu_id;
453		__entry->vector		= vector;
454	),
455
456	TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
457);
458
459TRACE_EVENT(kvm_pv_eoi,
460	    TP_PROTO(struct kvm_lapic *apic, int vector),
461	    TP_ARGS(apic, vector),
462
463	TP_STRUCT__entry(
464		__field(	__u32,		apicid		)
465		__field(	int,		vector		)
466	),
467
468	TP_fast_assign(
469		__entry->apicid		= apic->vcpu->vcpu_id;
470		__entry->vector		= vector;
471	),
472
473	TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
474);
475
476/*
477 * Tracepoint for nested VMRUN
478 */
479TRACE_EVENT(kvm_nested_vmrun,
480	    TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
481		     __u32 event_inj, bool npt),
482	    TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt),
483
484	TP_STRUCT__entry(
485		__field(	__u64,		rip		)
486		__field(	__u64,		vmcb		)
487		__field(	__u64,		nested_rip	)
488		__field(	__u32,		int_ctl		)
489		__field(	__u32,		event_inj	)
490		__field(	bool,		npt		)
491	),
492
493	TP_fast_assign(
494		__entry->rip		= rip;
495		__entry->vmcb		= vmcb;
496		__entry->nested_rip	= nested_rip;
497		__entry->int_ctl	= int_ctl;
498		__entry->event_inj	= event_inj;
499		__entry->npt		= npt;
500	),
501
502	TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x "
503		  "event_inj: 0x%08x npt: %s",
504		__entry->rip, __entry->vmcb, __entry->nested_rip,
505		__entry->int_ctl, __entry->event_inj,
506		__entry->npt ? "on" : "off")
507);
508
509TRACE_EVENT(kvm_nested_intercepts,
510	    TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept),
511	    TP_ARGS(cr_read, cr_write, exceptions, intercept),
512
513	TP_STRUCT__entry(
514		__field(	__u16,		cr_read		)
515		__field(	__u16,		cr_write	)
516		__field(	__u32,		exceptions	)
517		__field(	__u64,		intercept	)
518	),
519
520	TP_fast_assign(
521		__entry->cr_read	= cr_read;
522		__entry->cr_write	= cr_write;
523		__entry->exceptions	= exceptions;
524		__entry->intercept	= intercept;
525	),
526
527	TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx",
528		__entry->cr_read, __entry->cr_write, __entry->exceptions,
529		__entry->intercept)
530);
531/*
532 * Tracepoint for #VMEXIT while nested
533 */
534TRACE_EVENT(kvm_nested_vmexit,
535	    TP_PROTO(__u64 rip, __u32 exit_code,
536		     __u64 exit_info1, __u64 exit_info2,
537		     __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
538	    TP_ARGS(rip, exit_code, exit_info1, exit_info2,
539		    exit_int_info, exit_int_info_err, isa),
540
541	TP_STRUCT__entry(
542		__field(	__u64,		rip			)
543		__field(	__u32,		exit_code		)
544		__field(	__u64,		exit_info1		)
545		__field(	__u64,		exit_info2		)
546		__field(	__u32,		exit_int_info		)
547		__field(	__u32,		exit_int_info_err	)
548		__field(	__u32,		isa			)
549	),
550
551	TP_fast_assign(
552		__entry->rip			= rip;
553		__entry->exit_code		= exit_code;
554		__entry->exit_info1		= exit_info1;
555		__entry->exit_info2		= exit_info2;
556		__entry->exit_int_info		= exit_int_info;
557		__entry->exit_int_info_err	= exit_int_info_err;
558		__entry->isa			= isa;
559	),
560	TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx "
561		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
562		  __entry->rip,
563		 (__entry->isa == KVM_ISA_VMX) ?
564		 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) :
565		 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS),
566		  __entry->exit_info1, __entry->exit_info2,
567		  __entry->exit_int_info, __entry->exit_int_info_err)
568);
569
570/*
571 * Tracepoint for #VMEXIT reinjected to the guest
572 */
573TRACE_EVENT(kvm_nested_vmexit_inject,
574	    TP_PROTO(__u32 exit_code,
575		     __u64 exit_info1, __u64 exit_info2,
576		     __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
577	    TP_ARGS(exit_code, exit_info1, exit_info2,
578		    exit_int_info, exit_int_info_err, isa),
579
580	TP_STRUCT__entry(
581		__field(	__u32,		exit_code		)
582		__field(	__u64,		exit_info1		)
583		__field(	__u64,		exit_info2		)
584		__field(	__u32,		exit_int_info		)
585		__field(	__u32,		exit_int_info_err	)
586		__field(	__u32,		isa			)
587	),
588
589	TP_fast_assign(
590		__entry->exit_code		= exit_code;
591		__entry->exit_info1		= exit_info1;
592		__entry->exit_info2		= exit_info2;
593		__entry->exit_int_info		= exit_int_info;
594		__entry->exit_int_info_err	= exit_int_info_err;
595		__entry->isa			= isa;
596	),
597
598	TP_printk("reason: %s ext_inf1: 0x%016llx "
599		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
600		 (__entry->isa == KVM_ISA_VMX) ?
601		 __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) :
602		 __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS),
603		__entry->exit_info1, __entry->exit_info2,
604		__entry->exit_int_info, __entry->exit_int_info_err)
605);
606
607/*
608 * Tracepoint for nested #vmexit because of interrupt pending
609 */
610TRACE_EVENT(kvm_nested_intr_vmexit,
611	    TP_PROTO(__u64 rip),
612	    TP_ARGS(rip),
613
614	TP_STRUCT__entry(
615		__field(	__u64,	rip	)
616	),
617
618	TP_fast_assign(
619		__entry->rip	=	rip
620	),
621
622	TP_printk("rip: 0x%016llx", __entry->rip)
623);
624
625/*
626 * Tracepoint for nested #vmexit because of interrupt pending
627 */
628TRACE_EVENT(kvm_invlpga,
629	    TP_PROTO(__u64 rip, int asid, u64 address),
630	    TP_ARGS(rip, asid, address),
631
632	TP_STRUCT__entry(
633		__field(	__u64,	rip	)
634		__field(	int,	asid	)
635		__field(	__u64,	address	)
636	),
637
638	TP_fast_assign(
639		__entry->rip		=	rip;
640		__entry->asid		=	asid;
641		__entry->address	=	address;
642	),
643
644	TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
645		  __entry->rip, __entry->asid, __entry->address)
646);
647
648/*
649 * Tracepoint for nested #vmexit because of interrupt pending
650 */
651TRACE_EVENT(kvm_skinit,
652	    TP_PROTO(__u64 rip, __u32 slb),
653	    TP_ARGS(rip, slb),
654
655	TP_STRUCT__entry(
656		__field(	__u64,	rip	)
657		__field(	__u32,	slb	)
658	),
659
660	TP_fast_assign(
661		__entry->rip		=	rip;
662		__entry->slb		=	slb;
663	),
664
665	TP_printk("rip: 0x%016llx slb: 0x%08x",
666		  __entry->rip, __entry->slb)
667);
668
669#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
670#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
671#define KVM_EMUL_INSN_F_CS_D   (1 << 2)
672#define KVM_EMUL_INSN_F_CS_L   (1 << 3)
673
674#define kvm_trace_symbol_emul_flags	                  \
675	{ 0,   			    "real" },		  \
676	{ KVM_EMUL_INSN_F_CR0_PE			  \
677	  | KVM_EMUL_INSN_F_EFL_VM, "vm16" },		  \
678	{ KVM_EMUL_INSN_F_CR0_PE,   "prot16" },		  \
679	{ KVM_EMUL_INSN_F_CR0_PE			  \
680	  | KVM_EMUL_INSN_F_CS_D,   "prot32" },		  \
681	{ KVM_EMUL_INSN_F_CR0_PE			  \
682	  | KVM_EMUL_INSN_F_CS_L,   "prot64" }
683
684#define kei_decode_mode(mode) ({			\
685	u8 flags = 0xff;				\
686	switch (mode) {					\
687	case X86EMUL_MODE_REAL:				\
688		flags = 0;				\
689		break;					\
690	case X86EMUL_MODE_VM86:				\
691		flags = KVM_EMUL_INSN_F_EFL_VM;		\
692		break;					\
693	case X86EMUL_MODE_PROT16:			\
694		flags = KVM_EMUL_INSN_F_CR0_PE;		\
695		break;					\
696	case X86EMUL_MODE_PROT32:			\
697		flags = KVM_EMUL_INSN_F_CR0_PE		\
698			| KVM_EMUL_INSN_F_CS_D;		\
699		break;					\
700	case X86EMUL_MODE_PROT64:			\
701		flags = KVM_EMUL_INSN_F_CR0_PE		\
702			| KVM_EMUL_INSN_F_CS_L;		\
703		break;					\
704	}						\
705	flags;						\
706	})
707
708TRACE_EVENT(kvm_emulate_insn,
709	TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
710	TP_ARGS(vcpu, failed),
711
712	TP_STRUCT__entry(
713		__field(    __u64, rip                       )
714		__field(    __u32, csbase                    )
715		__field(    __u8,  len                       )
716		__array(    __u8,  insn,    15	             )
717		__field(    __u8,  flags       	   	     )
718		__field(    __u8,  failed                    )
719		),
720
721	TP_fast_assign(
722		__entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
723		__entry->len = vcpu->arch.emulate_ctxt.fetch.ptr
724			       - vcpu->arch.emulate_ctxt.fetch.data;
725		__entry->rip = vcpu->arch.emulate_ctxt._eip - __entry->len;
726		memcpy(__entry->insn,
727		       vcpu->arch.emulate_ctxt.fetch.data,
728		       15);
729		__entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode);
730		__entry->failed = failed;
731		),
732
733	TP_printk("%x:%llx:%s (%s)%s",
734		  __entry->csbase, __entry->rip,
735		  __print_hex(__entry->insn, __entry->len),
736		  __print_symbolic(__entry->flags,
737				   kvm_trace_symbol_emul_flags),
738		  __entry->failed ? " failed" : ""
739		)
740	);
741
742#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
743#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
744
745TRACE_EVENT(
746	vcpu_match_mmio,
747	TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
748	TP_ARGS(gva, gpa, write, gpa_match),
749
750	TP_STRUCT__entry(
751		__field(gva_t, gva)
752		__field(gpa_t, gpa)
753		__field(bool, write)
754		__field(bool, gpa_match)
755		),
756
757	TP_fast_assign(
758		__entry->gva = gva;
759		__entry->gpa = gpa;
760		__entry->write = write;
761		__entry->gpa_match = gpa_match
762		),
763
764	TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
765		  __entry->write ? "Write" : "Read",
766		  __entry->gpa_match ? "GPA" : "GVA")
767);
768
769TRACE_EVENT(kvm_write_tsc_offset,
770	TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset,
771		 __u64 next_tsc_offset),
772	TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset),
773
774	TP_STRUCT__entry(
775		__field( unsigned int,	vcpu_id				)
776		__field(	__u64,	previous_tsc_offset		)
777		__field(	__u64,	next_tsc_offset			)
778	),
779
780	TP_fast_assign(
781		__entry->vcpu_id		= vcpu_id;
782		__entry->previous_tsc_offset	= previous_tsc_offset;
783		__entry->next_tsc_offset	= next_tsc_offset;
784	),
785
786	TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id,
787		  __entry->previous_tsc_offset, __entry->next_tsc_offset)
788);
789
790#ifdef CONFIG_X86_64
791
792#define host_clocks					\
793	{VCLOCK_NONE, "none"},				\
794	{VCLOCK_TSC,  "tsc"},				\
795	{VCLOCK_HPET, "hpet"}				\
796
797TRACE_EVENT(kvm_update_master_clock,
798	TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
799	TP_ARGS(use_master_clock, host_clock, offset_matched),
800
801	TP_STRUCT__entry(
802		__field(		bool,	use_master_clock	)
803		__field(	unsigned int,	host_clock		)
804		__field(		bool,	offset_matched		)
805	),
806
807	TP_fast_assign(
808		__entry->use_master_clock	= use_master_clock;
809		__entry->host_clock		= host_clock;
810		__entry->offset_matched		= offset_matched;
811	),
812
813	TP_printk("masterclock %d hostclock %s offsetmatched %u",
814		  __entry->use_master_clock,
815		  __print_symbolic(__entry->host_clock, host_clocks),
816		  __entry->offset_matched)
817);
818
819TRACE_EVENT(kvm_track_tsc,
820	TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched,
821		 unsigned int online_vcpus, bool use_master_clock,
822		 unsigned int host_clock),
823	TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock,
824		host_clock),
825
826	TP_STRUCT__entry(
827		__field(	unsigned int,	vcpu_id			)
828		__field(	unsigned int,	nr_vcpus_matched_tsc	)
829		__field(	unsigned int,	online_vcpus		)
830		__field(	bool,		use_master_clock	)
831		__field(	unsigned int,	host_clock		)
832	),
833
834	TP_fast_assign(
835		__entry->vcpu_id		= vcpu_id;
836		__entry->nr_vcpus_matched_tsc	= nr_matched;
837		__entry->online_vcpus		= online_vcpus;
838		__entry->use_master_clock	= use_master_clock;
839		__entry->host_clock		= host_clock;
840	),
841
842	TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u"
843		  " hostclock %s",
844		  __entry->vcpu_id, __entry->use_master_clock,
845		  __entry->nr_vcpus_matched_tsc, __entry->online_vcpus,
846		  __print_symbolic(__entry->host_clock, host_clocks))
847);
848
849#endif /* CONFIG_X86_64 */
850
851/*
852 * Tracepoint for PML full VMEXIT.
853 */
854TRACE_EVENT(kvm_pml_full,
855	TP_PROTO(unsigned int vcpu_id),
856	TP_ARGS(vcpu_id),
857
858	TP_STRUCT__entry(
859		__field(	unsigned int,	vcpu_id			)
860	),
861
862	TP_fast_assign(
863		__entry->vcpu_id		= vcpu_id;
864	),
865
866	TP_printk("vcpu %d: PML full", __entry->vcpu_id)
867);
868
869TRACE_EVENT(kvm_ple_window,
870	TP_PROTO(bool grow, unsigned int vcpu_id, int new, int old),
871	TP_ARGS(grow, vcpu_id, new, old),
872
873	TP_STRUCT__entry(
874		__field(                bool,      grow         )
875		__field(        unsigned int,   vcpu_id         )
876		__field(                 int,       new         )
877		__field(                 int,       old         )
878	),
879
880	TP_fast_assign(
881		__entry->grow           = grow;
882		__entry->vcpu_id        = vcpu_id;
883		__entry->new            = new;
884		__entry->old            = old;
885	),
886
887	TP_printk("vcpu %u: ple_window %d (%s %d)",
888	          __entry->vcpu_id,
889	          __entry->new,
890	          __entry->grow ? "grow" : "shrink",
891	          __entry->old)
892);
893
894#define trace_kvm_ple_window_grow(vcpu_id, new, old) \
895	trace_kvm_ple_window(true, vcpu_id, new, old)
896#define trace_kvm_ple_window_shrink(vcpu_id, new, old) \
897	trace_kvm_ple_window(false, vcpu_id, new, old)
898
899TRACE_EVENT(kvm_pvclock_update,
900	TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock),
901	TP_ARGS(vcpu_id, pvclock),
902
903	TP_STRUCT__entry(
904		__field(	unsigned int,	vcpu_id			)
905		__field(	__u32,		version			)
906		__field(	__u64,		tsc_timestamp		)
907		__field(	__u64,		system_time		)
908		__field(	__u32,		tsc_to_system_mul	)
909		__field(	__s8,		tsc_shift		)
910		__field(	__u8,		flags			)
911	),
912
913	TP_fast_assign(
914		__entry->vcpu_id	   = vcpu_id;
915		__entry->version	   = pvclock->version;
916		__entry->tsc_timestamp	   = pvclock->tsc_timestamp;
917		__entry->system_time	   = pvclock->system_time;
918		__entry->tsc_to_system_mul = pvclock->tsc_to_system_mul;
919		__entry->tsc_shift	   = pvclock->tsc_shift;
920		__entry->flags		   = pvclock->flags;
921	),
922
923	TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, "
924		  "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, "
925		  "flags 0x%x }",
926		  __entry->vcpu_id,
927		  __entry->version,
928		  __entry->tsc_timestamp,
929		  __entry->system_time,
930		  __entry->tsc_to_system_mul,
931		  __entry->tsc_shift,
932		  __entry->flags)
933);
934
935TRACE_EVENT(kvm_wait_lapic_expire,
936	TP_PROTO(unsigned int vcpu_id, s64 delta),
937	TP_ARGS(vcpu_id, delta),
938
939	TP_STRUCT__entry(
940		__field(	unsigned int,	vcpu_id		)
941		__field(	s64,		delta		)
942	),
943
944	TP_fast_assign(
945		__entry->vcpu_id	   = vcpu_id;
946		__entry->delta             = delta;
947	),
948
949	TP_printk("vcpu %u: delta %lld (%s)",
950		  __entry->vcpu_id,
951		  __entry->delta,
952		  __entry->delta < 0 ? "early" : "late")
953);
954
955#endif /* _TRACE_KVM_H */
956
957#undef TRACE_INCLUDE_PATH
958#define TRACE_INCLUDE_PATH arch/x86/kvm
959#undef TRACE_INCLUDE_FILE
960#define TRACE_INCLUDE_FILE trace
961
962/* This part must be outside protection */
963#include <trace/define_trace.h>
964