• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  #if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
2  #define _TRACE_KVMMMU_H
3  
4  #include <linux/tracepoint.h>
5  #include <linux/ftrace_event.h>
6  
7  #undef TRACE_SYSTEM
8  #define TRACE_SYSTEM kvmmmu
9  
10  #define KVM_MMU_PAGE_FIELDS			\
11  	__field(unsigned long, mmu_valid_gen)	\
12  	__field(__u64, gfn)			\
13  	__field(__u32, role)			\
14  	__field(__u32, root_count)		\
15  	__field(bool, unsync)
16  
17  #define KVM_MMU_PAGE_ASSIGN(sp)				\
18  	__entry->mmu_valid_gen = sp->mmu_valid_gen;	\
19  	__entry->gfn = sp->gfn;				\
20  	__entry->role = sp->role.word;			\
21  	__entry->root_count = sp->root_count;		\
22  	__entry->unsync = sp->unsync;
23  
24  #define KVM_MMU_PAGE_PRINTK() ({				        \
25  	const u32 saved_len = p->len;					\
26  	static const char *access_str[] = {			        \
27  		"---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux"  \
28  	};							        \
29  	union kvm_mmu_page_role role;				        \
30  								        \
31  	role.word = __entry->role;					\
32  									\
33  	trace_seq_printf(p, "sp gen %lx gfn %llx %u%s q%u%s %s%s"	\
34  			 " %snxe root %u %s%c",	__entry->mmu_valid_gen,	\
35  			 __entry->gfn, role.level,			\
36  			 role.cr4_pae ? " pae" : "",			\
37  			 role.quadrant,					\
38  			 role.direct ? " direct" : "",			\
39  			 access_str[role.access],			\
40  			 role.invalid ? " invalid" : "",		\
41  			 role.nxe ? "" : "!",				\
42  			 __entry->root_count,				\
43  			 __entry->unsync ? "unsync" : "sync", 0);	\
44  	p->buffer + saved_len;						\
45  		})
46  
47  #define kvm_mmu_trace_pferr_flags       \
48  	{ PFERR_PRESENT_MASK, "P" },	\
49  	{ PFERR_WRITE_MASK, "W" },	\
50  	{ PFERR_USER_MASK, "U" },	\
51  	{ PFERR_RSVD_MASK, "RSVD" },	\
52  	{ PFERR_FETCH_MASK, "F" }
53  
54  /*
55   * A pagetable walk has started
56   */
57  TRACE_EVENT(
58  	kvm_mmu_pagetable_walk,
59  	TP_PROTO(u64 addr, u32 pferr),
60  	TP_ARGS(addr, pferr),
61  
62  	TP_STRUCT__entry(
63  		__field(__u64, addr)
64  		__field(__u32, pferr)
65  	),
66  
67  	TP_fast_assign(
68  		__entry->addr = addr;
69  		__entry->pferr = pferr;
70  	),
71  
72  	TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
73  		  __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
74  );
75  
76  
77  /* We just walked a paging element */
78  TRACE_EVENT(
79  	kvm_mmu_paging_element,
80  	TP_PROTO(u64 pte, int level),
81  	TP_ARGS(pte, level),
82  
83  	TP_STRUCT__entry(
84  		__field(__u64, pte)
85  		__field(__u32, level)
86  		),
87  
88  	TP_fast_assign(
89  		__entry->pte = pte;
90  		__entry->level = level;
91  		),
92  
93  	TP_printk("pte %llx level %u", __entry->pte, __entry->level)
94  );
95  
96  DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class,
97  
98  	TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
99  
100  	TP_ARGS(table_gfn, index, size),
101  
102  	TP_STRUCT__entry(
103  		__field(__u64, gpa)
104  	),
105  
106  	TP_fast_assign(
107  		__entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
108  				+ index * size;
109  		),
110  
111  	TP_printk("gpa %llx", __entry->gpa)
112  );
113  
114  /* We set a pte accessed bit */
115  DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
116  
117  	TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
118  
119  	TP_ARGS(table_gfn, index, size)
120  );
121  
122  /* We set a pte dirty bit */
123  DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
124  
125  	TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
126  
127  	TP_ARGS(table_gfn, index, size)
128  );
129  
130  TRACE_EVENT(
131  	kvm_mmu_walker_error,
132  	TP_PROTO(u32 pferr),
133  	TP_ARGS(pferr),
134  
135  	TP_STRUCT__entry(
136  		__field(__u32, pferr)
137  		),
138  
139  	TP_fast_assign(
140  		__entry->pferr = pferr;
141  		),
142  
143  	TP_printk("pferr %x %s", __entry->pferr,
144  		  __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
145  );
146  
147  TRACE_EVENT(
148  	kvm_mmu_get_page,
149  	TP_PROTO(struct kvm_mmu_page *sp, bool created),
150  	TP_ARGS(sp, created),
151  
152  	TP_STRUCT__entry(
153  		KVM_MMU_PAGE_FIELDS
154  		__field(bool, created)
155  		),
156  
157  	TP_fast_assign(
158  		KVM_MMU_PAGE_ASSIGN(sp)
159  		__entry->created = created;
160  		),
161  
162  	TP_printk("%s %s", KVM_MMU_PAGE_PRINTK(),
163  		  __entry->created ? "new" : "existing")
164  );
165  
166  DECLARE_EVENT_CLASS(kvm_mmu_page_class,
167  
168  	TP_PROTO(struct kvm_mmu_page *sp),
169  	TP_ARGS(sp),
170  
171  	TP_STRUCT__entry(
172  		KVM_MMU_PAGE_FIELDS
173  	),
174  
175  	TP_fast_assign(
176  		KVM_MMU_PAGE_ASSIGN(sp)
177  	),
178  
179  	TP_printk("%s", KVM_MMU_PAGE_PRINTK())
180  );
181  
182  DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_sync_page,
183  	TP_PROTO(struct kvm_mmu_page *sp),
184  
185  	TP_ARGS(sp)
186  );
187  
188  DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page,
189  	TP_PROTO(struct kvm_mmu_page *sp),
190  
191  	TP_ARGS(sp)
192  );
193  
194  DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
195  	TP_PROTO(struct kvm_mmu_page *sp),
196  
197  	TP_ARGS(sp)
198  );
199  
200  TRACE_EVENT(
201  	mark_mmio_spte,
202  	TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
203  	TP_ARGS(sptep, gfn, access, gen),
204  
205  	TP_STRUCT__entry(
206  		__field(void *, sptep)
207  		__field(gfn_t, gfn)
208  		__field(unsigned, access)
209  		__field(unsigned int, gen)
210  	),
211  
212  	TP_fast_assign(
213  		__entry->sptep = sptep;
214  		__entry->gfn = gfn;
215  		__entry->access = access;
216  		__entry->gen = gen;
217  	),
218  
219  	TP_printk("sptep:%p gfn %llx access %x gen %x", __entry->sptep,
220  		  __entry->gfn, __entry->access, __entry->gen)
221  );
222  
223  TRACE_EVENT(
224  	handle_mmio_page_fault,
225  	TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
226  	TP_ARGS(addr, gfn, access),
227  
228  	TP_STRUCT__entry(
229  		__field(u64, addr)
230  		__field(gfn_t, gfn)
231  		__field(unsigned, access)
232  	),
233  
234  	TP_fast_assign(
235  		__entry->addr = addr;
236  		__entry->gfn = gfn;
237  		__entry->access = access;
238  	),
239  
240  	TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
241  		  __entry->access)
242  );
243  
244  #define __spte_satisfied(__spte)				\
245  	(__entry->retry && is_writable_pte(__entry->__spte))
246  
247  TRACE_EVENT(
248  	fast_page_fault,
249  	TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
250  		 u64 *sptep, u64 old_spte, bool retry),
251  	TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
252  
253  	TP_STRUCT__entry(
254  		__field(int, vcpu_id)
255  		__field(gva_t, gva)
256  		__field(u32, error_code)
257  		__field(u64 *, sptep)
258  		__field(u64, old_spte)
259  		__field(u64, new_spte)
260  		__field(bool, retry)
261  	),
262  
263  	TP_fast_assign(
264  		__entry->vcpu_id = vcpu->vcpu_id;
265  		__entry->gva = gva;
266  		__entry->error_code = error_code;
267  		__entry->sptep = sptep;
268  		__entry->old_spte = old_spte;
269  		__entry->new_spte = *sptep;
270  		__entry->retry = retry;
271  	),
272  
273  	TP_printk("vcpu %d gva %lx error_code %s sptep %p old %#llx"
274  		  " new %llx spurious %d fixed %d", __entry->vcpu_id,
275  		  __entry->gva, __print_flags(__entry->error_code, "|",
276  		  kvm_mmu_trace_pferr_flags), __entry->sptep,
277  		  __entry->old_spte, __entry->new_spte,
278  		  __spte_satisfied(old_spte), __spte_satisfied(new_spte)
279  	)
280  );
281  
282  TRACE_EVENT(
283  	kvm_mmu_invalidate_zap_all_pages,
284  	TP_PROTO(struct kvm *kvm),
285  	TP_ARGS(kvm),
286  
287  	TP_STRUCT__entry(
288  		__field(unsigned long, mmu_valid_gen)
289  		__field(unsigned int, mmu_used_pages)
290  	),
291  
292  	TP_fast_assign(
293  		__entry->mmu_valid_gen = kvm->arch.mmu_valid_gen;
294  		__entry->mmu_used_pages = kvm->arch.n_used_mmu_pages;
295  	),
296  
297  	TP_printk("kvm-mmu-valid-gen %lx used_pages %x",
298  		  __entry->mmu_valid_gen, __entry->mmu_used_pages
299  	)
300  );
301  
302  
303  TRACE_EVENT(
304  	check_mmio_spte,
305  	TP_PROTO(u64 spte, unsigned int kvm_gen, unsigned int spte_gen),
306  	TP_ARGS(spte, kvm_gen, spte_gen),
307  
308  	TP_STRUCT__entry(
309  		__field(unsigned int, kvm_gen)
310  		__field(unsigned int, spte_gen)
311  		__field(u64, spte)
312  	),
313  
314  	TP_fast_assign(
315  		__entry->kvm_gen = kvm_gen;
316  		__entry->spte_gen = spte_gen;
317  		__entry->spte = spte;
318  	),
319  
320  	TP_printk("spte %llx kvm_gen %x spte-gen %x valid %d", __entry->spte,
321  		  __entry->kvm_gen, __entry->spte_gen,
322  		  __entry->kvm_gen == __entry->spte_gen
323  	)
324  );
325  #endif /* _TRACE_KVMMMU_H */
326  
327  #undef TRACE_INCLUDE_PATH
328  #define TRACE_INCLUDE_PATH .
329  #undef TRACE_INCLUDE_FILE
330  #define TRACE_INCLUDE_FILE mmutrace
331  
332  /* This part must be outside protection */
333  #include <trace/define_trace.h>
334