• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/x86/kvm/
1#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVM_H
3
4#include <linux/tracepoint.h>
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM kvm
8
9/*
10 * Tracepoint for guest mode entry.
11 */
12TRACE_EVENT(kvm_entry,
13	TP_PROTO(unsigned int vcpu_id),
14	TP_ARGS(vcpu_id),
15
16	TP_STRUCT__entry(
17		__field(	unsigned int,	vcpu_id		)
18	),
19
20	TP_fast_assign(
21		__entry->vcpu_id	= vcpu_id;
22	),
23
24	TP_printk("vcpu %u", __entry->vcpu_id)
25);
26
27/*
28 * Tracepoint for hypercall.
29 */
30TRACE_EVENT(kvm_hypercall,
31	TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
32		 unsigned long a2, unsigned long a3),
33	TP_ARGS(nr, a0, a1, a2, a3),
34
35	TP_STRUCT__entry(
36		__field(	unsigned long, 	nr		)
37		__field(	unsigned long,	a0		)
38		__field(	unsigned long,	a1		)
39		__field(	unsigned long,	a2		)
40		__field(	unsigned long,	a3		)
41	),
42
43	TP_fast_assign(
44		__entry->nr		= nr;
45		__entry->a0		= a0;
46		__entry->a1		= a1;
47		__entry->a2		= a2;
48		__entry->a3		= a3;
49	),
50
51	TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
52		 __entry->nr, __entry->a0, __entry->a1,  __entry->a2,
53		 __entry->a3)
54);
55
56/*
57 * Tracepoint for hypercall.
58 */
59TRACE_EVENT(kvm_hv_hypercall,
60	TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx,
61		 __u64 ingpa, __u64 outgpa),
62	TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa),
63
64	TP_STRUCT__entry(
65		__field(	__u16, 		code		)
66		__field(	bool,		fast		)
67		__field(	__u16,		rep_cnt		)
68		__field(	__u16,		rep_idx		)
69		__field(	__u64,		ingpa		)
70		__field(	__u64,		outgpa		)
71	),
72
73	TP_fast_assign(
74		__entry->code		= code;
75		__entry->fast		= fast;
76		__entry->rep_cnt	= rep_cnt;
77		__entry->rep_idx	= rep_idx;
78		__entry->ingpa		= ingpa;
79		__entry->outgpa		= outgpa;
80	),
81
82	TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
83		  __entry->code, __entry->fast ? "fast" : "slow",
84		  __entry->rep_cnt, __entry->rep_idx,  __entry->ingpa,
85		  __entry->outgpa)
86);
87
88/*
89 * Tracepoint for PIO.
90 */
91TRACE_EVENT(kvm_pio,
92	TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
93		 unsigned int count),
94	TP_ARGS(rw, port, size, count),
95
96	TP_STRUCT__entry(
97		__field(	unsigned int, 	rw		)
98		__field(	unsigned int, 	port		)
99		__field(	unsigned int, 	size		)
100		__field(	unsigned int,	count		)
101	),
102
103	TP_fast_assign(
104		__entry->rw		= rw;
105		__entry->port		= port;
106		__entry->size		= size;
107		__entry->count		= count;
108	),
109
110	TP_printk("pio_%s at 0x%x size %d count %d",
111		  __entry->rw ? "write" : "read",
112		  __entry->port, __entry->size, __entry->count)
113);
114
115/*
116 * Tracepoint for cpuid.
117 */
118TRACE_EVENT(kvm_cpuid,
119	TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
120		 unsigned long rcx, unsigned long rdx),
121	TP_ARGS(function, rax, rbx, rcx, rdx),
122
123	TP_STRUCT__entry(
124		__field(	unsigned int,	function	)
125		__field(	unsigned long,	rax		)
126		__field(	unsigned long,	rbx		)
127		__field(	unsigned long,	rcx		)
128		__field(	unsigned long,	rdx		)
129	),
130
131	TP_fast_assign(
132		__entry->function	= function;
133		__entry->rax		= rax;
134		__entry->rbx		= rbx;
135		__entry->rcx		= rcx;
136		__entry->rdx		= rdx;
137	),
138
139	TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx",
140		  __entry->function, __entry->rax,
141		  __entry->rbx, __entry->rcx, __entry->rdx)
142);
143
144#define AREG(x) { APIC_##x, "APIC_" #x }
145
146#define kvm_trace_symbol_apic						    \
147	AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI),    \
148	AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR),  \
149	AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
150	AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR),   \
151	AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT),  \
152	AREG(ECTRL)
153/*
154 * Tracepoint for apic access.
155 */
156TRACE_EVENT(kvm_apic,
157	TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val),
158	TP_ARGS(rw, reg, val),
159
160	TP_STRUCT__entry(
161		__field(	unsigned int,	rw		)
162		__field(	unsigned int,	reg		)
163		__field(	unsigned int,	val		)
164	),
165
166	TP_fast_assign(
167		__entry->rw		= rw;
168		__entry->reg		= reg;
169		__entry->val		= val;
170	),
171
172	TP_printk("apic_%s %s = 0x%x",
173		  __entry->rw ? "write" : "read",
174		  __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
175		  __entry->val)
176);
177
178#define trace_kvm_apic_read(reg, val)		trace_kvm_apic(0, reg, val)
179#define trace_kvm_apic_write(reg, val)		trace_kvm_apic(1, reg, val)
180
181/*
182 * Tracepoint for kvm guest exit:
183 */
184TRACE_EVENT(kvm_exit,
185	TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu),
186	TP_ARGS(exit_reason, vcpu),
187
188	TP_STRUCT__entry(
189		__field(	unsigned int,	exit_reason	)
190		__field(	unsigned long,	guest_rip	)
191	),
192
193	TP_fast_assign(
194		__entry->exit_reason	= exit_reason;
195		__entry->guest_rip	= kvm_rip_read(vcpu);
196	),
197
198	TP_printk("reason %s rip 0x%lx",
199		 ftrace_print_symbols_seq(p, __entry->exit_reason,
200					  kvm_x86_ops->exit_reasons_str),
201		 __entry->guest_rip)
202);
203
204/*
205 * Tracepoint for kvm interrupt injection:
206 */
207TRACE_EVENT(kvm_inj_virq,
208	TP_PROTO(unsigned int irq),
209	TP_ARGS(irq),
210
211	TP_STRUCT__entry(
212		__field(	unsigned int,	irq		)
213	),
214
215	TP_fast_assign(
216		__entry->irq		= irq;
217	),
218
219	TP_printk("irq %u", __entry->irq)
220);
221
222#define EXS(x) { x##_VECTOR, "#" #x }
223
224#define kvm_trace_sym_exc						\
225	EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM),	\
226	EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF),		\
227	EXS(MF), EXS(MC)
228
229/*
230 * Tracepoint for kvm interrupt injection:
231 */
232TRACE_EVENT(kvm_inj_exception,
233	TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
234	TP_ARGS(exception, has_error, error_code),
235
236	TP_STRUCT__entry(
237		__field(	u8,	exception	)
238		__field(	u8,	has_error	)
239		__field(	u32,	error_code	)
240	),
241
242	TP_fast_assign(
243		__entry->exception	= exception;
244		__entry->has_error	= has_error;
245		__entry->error_code	= error_code;
246	),
247
248	TP_printk("%s (0x%x)",
249		  __print_symbolic(__entry->exception, kvm_trace_sym_exc),
250		  __entry->has_error ? __entry->error_code : 0)
251);
252
253/*
254 * Tracepoint for page fault.
255 */
256TRACE_EVENT(kvm_page_fault,
257	TP_PROTO(unsigned long fault_address, unsigned int error_code),
258	TP_ARGS(fault_address, error_code),
259
260	TP_STRUCT__entry(
261		__field(	unsigned long,	fault_address	)
262		__field(	unsigned int,	error_code	)
263	),
264
265	TP_fast_assign(
266		__entry->fault_address	= fault_address;
267		__entry->error_code	= error_code;
268	),
269
270	TP_printk("address %lx error_code %x",
271		  __entry->fault_address, __entry->error_code)
272);
273
274/*
275 * Tracepoint for guest MSR access.
276 */
277TRACE_EVENT(kvm_msr,
278	TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
279	TP_ARGS(write, ecx, data, exception),
280
281	TP_STRUCT__entry(
282		__field(	unsigned,	write		)
283		__field(	u32,		ecx		)
284		__field(	u64,		data		)
285		__field(	u8,		exception	)
286	),
287
288	TP_fast_assign(
289		__entry->write		= write;
290		__entry->ecx		= ecx;
291		__entry->data		= data;
292		__entry->exception	= exception;
293	),
294
295	TP_printk("msr_%s %x = 0x%llx%s",
296		  __entry->write ? "write" : "read",
297		  __entry->ecx, __entry->data,
298		  __entry->exception ? " (#GP)" : "")
299);
300
301#define trace_kvm_msr_read(ecx, data)      trace_kvm_msr(0, ecx, data, false)
302#define trace_kvm_msr_write(ecx, data)     trace_kvm_msr(1, ecx, data, false)
303#define trace_kvm_msr_read_ex(ecx)         trace_kvm_msr(0, ecx, 0, true)
304#define trace_kvm_msr_write_ex(ecx, data)  trace_kvm_msr(1, ecx, data, true)
305
306/*
307 * Tracepoint for guest CR access.
308 */
309TRACE_EVENT(kvm_cr,
310	TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
311	TP_ARGS(rw, cr, val),
312
313	TP_STRUCT__entry(
314		__field(	unsigned int,	rw		)
315		__field(	unsigned int,	cr		)
316		__field(	unsigned long,	val		)
317	),
318
319	TP_fast_assign(
320		__entry->rw		= rw;
321		__entry->cr		= cr;
322		__entry->val		= val;
323	),
324
325	TP_printk("cr_%s %x = 0x%lx",
326		  __entry->rw ? "write" : "read",
327		  __entry->cr, __entry->val)
328);
329
330#define trace_kvm_cr_read(cr, val)		trace_kvm_cr(0, cr, val)
331#define trace_kvm_cr_write(cr, val)		trace_kvm_cr(1, cr, val)
332
333TRACE_EVENT(kvm_pic_set_irq,
334	    TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
335	    TP_ARGS(chip, pin, elcr, imr, coalesced),
336
337	TP_STRUCT__entry(
338		__field(	__u8,		chip		)
339		__field(	__u8,		pin		)
340		__field(	__u8,		elcr		)
341		__field(	__u8,		imr		)
342		__field(	bool,		coalesced	)
343	),
344
345	TP_fast_assign(
346		__entry->chip		= chip;
347		__entry->pin		= pin;
348		__entry->elcr		= elcr;
349		__entry->imr		= imr;
350		__entry->coalesced	= coalesced;
351	),
352
353	TP_printk("chip %u pin %u (%s%s)%s",
354		  __entry->chip, __entry->pin,
355		  (__entry->elcr & (1 << __entry->pin)) ? "level":"edge",
356		  (__entry->imr & (1 << __entry->pin)) ? "|masked":"",
357		  __entry->coalesced ? " (coalesced)" : "")
358);
359
360#define kvm_apic_dst_shorthand		\
361	{0x0, "dst"},			\
362	{0x1, "self"},			\
363	{0x2, "all"},			\
364	{0x3, "all-but-self"}
365
366TRACE_EVENT(kvm_apic_ipi,
367	    TP_PROTO(__u32 icr_low, __u32 dest_id),
368	    TP_ARGS(icr_low, dest_id),
369
370	TP_STRUCT__entry(
371		__field(	__u32,		icr_low		)
372		__field(	__u32,		dest_id		)
373	),
374
375	TP_fast_assign(
376		__entry->icr_low	= icr_low;
377		__entry->dest_id	= dest_id;
378	),
379
380	TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)",
381		  __entry->dest_id, (u8)__entry->icr_low,
382		  __print_symbolic((__entry->icr_low >> 8 & 0x7),
383				   kvm_deliver_mode),
384		  (__entry->icr_low & (1<<11)) ? "logical" : "physical",
385		  (__entry->icr_low & (1<<14)) ? "assert" : "de-assert",
386		  (__entry->icr_low & (1<<15)) ? "level" : "edge",
387		  __print_symbolic((__entry->icr_low >> 18 & 0x3),
388				   kvm_apic_dst_shorthand))
389);
390
391TRACE_EVENT(kvm_apic_accept_irq,
392	    TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec, bool coalesced),
393	    TP_ARGS(apicid, dm, tm, vec, coalesced),
394
395	TP_STRUCT__entry(
396		__field(	__u32,		apicid		)
397		__field(	__u16,		dm		)
398		__field(	__u8,		tm		)
399		__field(	__u8,		vec		)
400		__field(	bool,		coalesced	)
401	),
402
403	TP_fast_assign(
404		__entry->apicid		= apicid;
405		__entry->dm		= dm;
406		__entry->tm		= tm;
407		__entry->vec		= vec;
408		__entry->coalesced	= coalesced;
409	),
410
411	TP_printk("apicid %x vec %u (%s|%s)%s",
412		  __entry->apicid, __entry->vec,
413		  __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
414		  __entry->tm ? "level" : "edge",
415		  __entry->coalesced ? " (coalesced)" : "")
416);
417
418/*
419 * Tracepoint for nested VMRUN
420 */
421TRACE_EVENT(kvm_nested_vmrun,
422	    TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
423		     __u32 event_inj, bool npt),
424	    TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt),
425
426	TP_STRUCT__entry(
427		__field(	__u64,		rip		)
428		__field(	__u64,		vmcb		)
429		__field(	__u64,		nested_rip	)
430		__field(	__u32,		int_ctl		)
431		__field(	__u32,		event_inj	)
432		__field(	bool,		npt		)
433	),
434
435	TP_fast_assign(
436		__entry->rip		= rip;
437		__entry->vmcb		= vmcb;
438		__entry->nested_rip	= nested_rip;
439		__entry->int_ctl	= int_ctl;
440		__entry->event_inj	= event_inj;
441		__entry->npt		= npt;
442	),
443
444	TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x "
445		  "event_inj: 0x%08x npt: %s",
446		__entry->rip, __entry->vmcb, __entry->nested_rip,
447		__entry->int_ctl, __entry->event_inj,
448		__entry->npt ? "on" : "off")
449);
450
451TRACE_EVENT(kvm_nested_intercepts,
452	    TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept),
453	    TP_ARGS(cr_read, cr_write, exceptions, intercept),
454
455	TP_STRUCT__entry(
456		__field(	__u16,		cr_read		)
457		__field(	__u16,		cr_write	)
458		__field(	__u32,		exceptions	)
459		__field(	__u64,		intercept	)
460	),
461
462	TP_fast_assign(
463		__entry->cr_read	= cr_read;
464		__entry->cr_write	= cr_write;
465		__entry->exceptions	= exceptions;
466		__entry->intercept	= intercept;
467	),
468
469	TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx",
470		__entry->cr_read, __entry->cr_write, __entry->exceptions,
471		__entry->intercept)
472);
473/*
474 * Tracepoint for #VMEXIT while nested
475 */
476TRACE_EVENT(kvm_nested_vmexit,
477	    TP_PROTO(__u64 rip, __u32 exit_code,
478		     __u64 exit_info1, __u64 exit_info2,
479		     __u32 exit_int_info, __u32 exit_int_info_err),
480	    TP_ARGS(rip, exit_code, exit_info1, exit_info2,
481		    exit_int_info, exit_int_info_err),
482
483	TP_STRUCT__entry(
484		__field(	__u64,		rip			)
485		__field(	__u32,		exit_code		)
486		__field(	__u64,		exit_info1		)
487		__field(	__u64,		exit_info2		)
488		__field(	__u32,		exit_int_info		)
489		__field(	__u32,		exit_int_info_err	)
490	),
491
492	TP_fast_assign(
493		__entry->rip			= rip;
494		__entry->exit_code		= exit_code;
495		__entry->exit_info1		= exit_info1;
496		__entry->exit_info2		= exit_info2;
497		__entry->exit_int_info		= exit_int_info;
498		__entry->exit_int_info_err	= exit_int_info_err;
499	),
500	TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx "
501		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
502		  __entry->rip,
503		  ftrace_print_symbols_seq(p, __entry->exit_code,
504					   kvm_x86_ops->exit_reasons_str),
505		  __entry->exit_info1, __entry->exit_info2,
506		  __entry->exit_int_info, __entry->exit_int_info_err)
507);
508
509/*
510 * Tracepoint for #VMEXIT reinjected to the guest
511 */
512TRACE_EVENT(kvm_nested_vmexit_inject,
513	    TP_PROTO(__u32 exit_code,
514		     __u64 exit_info1, __u64 exit_info2,
515		     __u32 exit_int_info, __u32 exit_int_info_err),
516	    TP_ARGS(exit_code, exit_info1, exit_info2,
517		    exit_int_info, exit_int_info_err),
518
519	TP_STRUCT__entry(
520		__field(	__u32,		exit_code		)
521		__field(	__u64,		exit_info1		)
522		__field(	__u64,		exit_info2		)
523		__field(	__u32,		exit_int_info		)
524		__field(	__u32,		exit_int_info_err	)
525	),
526
527	TP_fast_assign(
528		__entry->exit_code		= exit_code;
529		__entry->exit_info1		= exit_info1;
530		__entry->exit_info2		= exit_info2;
531		__entry->exit_int_info		= exit_int_info;
532		__entry->exit_int_info_err	= exit_int_info_err;
533	),
534
535	TP_printk("reason: %s ext_inf1: 0x%016llx "
536		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
537		  ftrace_print_symbols_seq(p, __entry->exit_code,
538					   kvm_x86_ops->exit_reasons_str),
539		__entry->exit_info1, __entry->exit_info2,
540		__entry->exit_int_info, __entry->exit_int_info_err)
541);
542
543/*
544 * Tracepoint for nested #vmexit because of interrupt pending
545 */
546TRACE_EVENT(kvm_nested_intr_vmexit,
547	    TP_PROTO(__u64 rip),
548	    TP_ARGS(rip),
549
550	TP_STRUCT__entry(
551		__field(	__u64,	rip	)
552	),
553
554	TP_fast_assign(
555		__entry->rip	=	rip
556	),
557
558	TP_printk("rip: 0x%016llx", __entry->rip)
559);
560
561/*
562 * Tracepoint for nested #vmexit because of interrupt pending
563 */
564TRACE_EVENT(kvm_invlpga,
565	    TP_PROTO(__u64 rip, int asid, u64 address),
566	    TP_ARGS(rip, asid, address),
567
568	TP_STRUCT__entry(
569		__field(	__u64,	rip	)
570		__field(	int,	asid	)
571		__field(	__u64,	address	)
572	),
573
574	TP_fast_assign(
575		__entry->rip		=	rip;
576		__entry->asid		=	asid;
577		__entry->address	=	address;
578	),
579
580	TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
581		  __entry->rip, __entry->asid, __entry->address)
582);
583
584/*
585 * Tracepoint for nested #vmexit because of interrupt pending
586 */
587TRACE_EVENT(kvm_skinit,
588	    TP_PROTO(__u64 rip, __u32 slb),
589	    TP_ARGS(rip, slb),
590
591	TP_STRUCT__entry(
592		__field(	__u64,	rip	)
593		__field(	__u32,	slb	)
594	),
595
596	TP_fast_assign(
597		__entry->rip		=	rip;
598		__entry->slb		=	slb;
599	),
600
601	TP_printk("rip: 0x%016llx slb: 0x%08x",
602		  __entry->rip, __entry->slb)
603);
604
605#define __print_insn(insn, ilen) ({		                 \
606	int i;							 \
607	const char *ret = p->buffer + p->len;			 \
608								 \
609	for (i = 0; i < ilen; ++i)				 \
610		trace_seq_printf(p, " %02x", insn[i]);		 \
611	trace_seq_printf(p, "%c", 0);				 \
612	ret;							 \
613	})
614
615#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
616#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
617#define KVM_EMUL_INSN_F_CS_D   (1 << 2)
618#define KVM_EMUL_INSN_F_CS_L   (1 << 3)
619
620#define kvm_trace_symbol_emul_flags	                  \
621	{ 0,   			    "real" },		  \
622	{ KVM_EMUL_INSN_F_CR0_PE			  \
623	  | KVM_EMUL_INSN_F_EFL_VM, "vm16" },		  \
624	{ KVM_EMUL_INSN_F_CR0_PE,   "prot16" },		  \
625	{ KVM_EMUL_INSN_F_CR0_PE			  \
626	  | KVM_EMUL_INSN_F_CS_D,   "prot32" },		  \
627	{ KVM_EMUL_INSN_F_CR0_PE			  \
628	  | KVM_EMUL_INSN_F_CS_L,   "prot64" }
629
630#define kei_decode_mode(mode) ({			\
631	u8 flags = 0xff;				\
632	switch (mode) {					\
633	case X86EMUL_MODE_REAL:				\
634		flags = 0;				\
635		break;					\
636	case X86EMUL_MODE_VM86:				\
637		flags = KVM_EMUL_INSN_F_EFL_VM;		\
638		break;					\
639	case X86EMUL_MODE_PROT16:			\
640		flags = KVM_EMUL_INSN_F_CR0_PE;		\
641		break;					\
642	case X86EMUL_MODE_PROT32:			\
643		flags = KVM_EMUL_INSN_F_CR0_PE		\
644			| KVM_EMUL_INSN_F_CS_D;		\
645		break;					\
646	case X86EMUL_MODE_PROT64:			\
647		flags = KVM_EMUL_INSN_F_CR0_PE		\
648			| KVM_EMUL_INSN_F_CS_L;		\
649		break;					\
650	}						\
651	flags;						\
652	})
653
654TRACE_EVENT(kvm_emulate_insn,
655	TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
656	TP_ARGS(vcpu, failed),
657
658	TP_STRUCT__entry(
659		__field(    __u64, rip                       )
660		__field(    __u32, csbase                    )
661		__field(    __u8,  len                       )
662		__array(    __u8,  insn,    15	             )
663		__field(    __u8,  flags       	   	     )
664		__field(    __u8,  failed                    )
665		),
666
667	TP_fast_assign(
668		__entry->rip = vcpu->arch.emulate_ctxt.decode.fetch.start;
669		__entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
670		__entry->len = vcpu->arch.emulate_ctxt.decode.eip
671			       - vcpu->arch.emulate_ctxt.decode.fetch.start;
672		memcpy(__entry->insn,
673		       vcpu->arch.emulate_ctxt.decode.fetch.data,
674		       15);
675		__entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode);
676		__entry->failed = failed;
677		),
678
679	TP_printk("%x:%llx:%s (%s)%s",
680		  __entry->csbase, __entry->rip,
681		  __print_insn(__entry->insn, __entry->len),
682		  __print_symbolic(__entry->flags,
683				   kvm_trace_symbol_emul_flags),
684		  __entry->failed ? " failed" : ""
685		)
686	);
687
688#define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
689#define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
690
691#endif /* _TRACE_KVM_H */
692
693#undef TRACE_INCLUDE_PATH
694#define TRACE_INCLUDE_PATH arch/x86/kvm
695#undef TRACE_INCLUDE_FILE
696#define TRACE_INCLUDE_FILE trace
697
698/* This part must be outside protection */
699#include <trace/define_trace.h>
700