1
2#if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
3#define _TRACE_KVMMMU_H
4
5#include <linux/tracepoint.h>
6#include <linux/trace_events.h>
7
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM kvmmmu
10
11#define KVM_MMU_PAGE_FIELDS \
12 __field(__u8, mmu_valid_gen) \
13 __field(__u64, gfn) \
14 __field(__u32, role) \
15 __field(__u32, root_count) \
16 __field(bool, unsync)
17
18#define KVM_MMU_PAGE_ASSIGN(sp) \
19 __entry->mmu_valid_gen = sp->mmu_valid_gen; \
20 __entry->gfn = sp->gfn; \
21 __entry->role = sp->role.word; \
22 __entry->root_count = sp->root_count; \
23 __entry->unsync = sp->unsync;
24
25#define KVM_MMU_PAGE_PRINTK() ({ \
26 const char *saved_ptr = trace_seq_buffer_ptr(p); \
27 static const char *access_str[] = { \
28 "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \
29 }; \
30 union kvm_mmu_page_role role; \
31 \
32 role.word = __entry->role; \
33 \
34 trace_seq_printf(p, "sp gen %u gfn %llx l%u %u-byte q%u%s %s%s" \
35 " %snxe %sad root %u %s%c", \
36 __entry->mmu_valid_gen, \
37 __entry->gfn, role.level, \
38 role.gpte_is_8_bytes ? 8 : 4, \
39 role.quadrant, \
40 role.direct ? " direct" : "", \
41 access_str[role.access], \
42 role.invalid ? " invalid" : "", \
43 role.nxe ? "" : "!", \
44 role.ad_disabled ? "!" : "", \
45 __entry->root_count, \
46 __entry->unsync ? "unsync" : "sync", 0); \
47 saved_ptr; \
48 })
49
50#define kvm_mmu_trace_pferr_flags \
51 { PFERR_PRESENT_MASK, "P" }, \
52 { PFERR_WRITE_MASK, "W" }, \
53 { PFERR_USER_MASK, "U" }, \
54 { PFERR_RSVD_MASK, "RSVD" }, \
55 { PFERR_FETCH_MASK, "F" }
56
57
58
59
60TRACE_EVENT(
61 kvm_mmu_pagetable_walk,
62 TP_PROTO(u64 addr, u32 pferr),
63 TP_ARGS(addr, pferr),
64
65 TP_STRUCT__entry(
66 __field(__u64, addr)
67 __field(__u32, pferr)
68 ),
69
70 TP_fast_assign(
71 __entry->addr = addr;
72 __entry->pferr = pferr;
73 ),
74
75 TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
76 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
77);
78
79
80
81TRACE_EVENT(
82 kvm_mmu_paging_element,
83 TP_PROTO(u64 pte, int level),
84 TP_ARGS(pte, level),
85
86 TP_STRUCT__entry(
87 __field(__u64, pte)
88 __field(__u32, level)
89 ),
90
91 TP_fast_assign(
92 __entry->pte = pte;
93 __entry->level = level;
94 ),
95
96 TP_printk("pte %llx level %u", __entry->pte, __entry->level)
97);
98
99DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class,
100
101 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
102
103 TP_ARGS(table_gfn, index, size),
104
105 TP_STRUCT__entry(
106 __field(__u64, gpa)
107 ),
108
109 TP_fast_assign(
110 __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
111 + index * size;
112 ),
113
114 TP_printk("gpa %llx", __entry->gpa)
115);
116
117
118DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
119
120 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
121
122 TP_ARGS(table_gfn, index, size)
123);
124
125
126DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
127
128 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
129
130 TP_ARGS(table_gfn, index, size)
131);
132
133TRACE_EVENT(
134 kvm_mmu_walker_error,
135 TP_PROTO(u32 pferr),
136 TP_ARGS(pferr),
137
138 TP_STRUCT__entry(
139 __field(__u32, pferr)
140 ),
141
142 TP_fast_assign(
143 __entry->pferr = pferr;
144 ),
145
146 TP_printk("pferr %x %s", __entry->pferr,
147 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
148);
149
150TRACE_EVENT(
151 kvm_mmu_get_page,
152 TP_PROTO(struct kvm_mmu_page *sp, bool created),
153 TP_ARGS(sp, created),
154
155 TP_STRUCT__entry(
156 KVM_MMU_PAGE_FIELDS
157 __field(bool, created)
158 ),
159
160 TP_fast_assign(
161 KVM_MMU_PAGE_ASSIGN(sp)
162 __entry->created = created;
163 ),
164
165 TP_printk("%s %s", KVM_MMU_PAGE_PRINTK(),
166 __entry->created ? "new" : "existing")
167);
168
169DECLARE_EVENT_CLASS(kvm_mmu_page_class,
170
171 TP_PROTO(struct kvm_mmu_page *sp),
172 TP_ARGS(sp),
173
174 TP_STRUCT__entry(
175 KVM_MMU_PAGE_FIELDS
176 ),
177
178 TP_fast_assign(
179 KVM_MMU_PAGE_ASSIGN(sp)
180 ),
181
182 TP_printk("%s", KVM_MMU_PAGE_PRINTK())
183);
184
185DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_sync_page,
186 TP_PROTO(struct kvm_mmu_page *sp),
187
188 TP_ARGS(sp)
189);
190
191DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page,
192 TP_PROTO(struct kvm_mmu_page *sp),
193
194 TP_ARGS(sp)
195);
196
197DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
198 TP_PROTO(struct kvm_mmu_page *sp),
199
200 TP_ARGS(sp)
201);
202
203TRACE_EVENT(
204 mark_mmio_spte,
205 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte),
206 TP_ARGS(sptep, gfn, spte),
207
208 TP_STRUCT__entry(
209 __field(void *, sptep)
210 __field(gfn_t, gfn)
211 __field(unsigned, access)
212 __field(unsigned int, gen)
213 ),
214
215 TP_fast_assign(
216 __entry->sptep = sptep;
217 __entry->gfn = gfn;
218 __entry->access = spte & ACC_ALL;
219 __entry->gen = get_mmio_spte_generation(spte);
220 ),
221
222 TP_printk("sptep:%p gfn %llx access %x gen %x", __entry->sptep,
223 __entry->gfn, __entry->access, __entry->gen)
224);
225
226TRACE_EVENT(
227 handle_mmio_page_fault,
228 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
229 TP_ARGS(addr, gfn, access),
230
231 TP_STRUCT__entry(
232 __field(u64, addr)
233 __field(gfn_t, gfn)
234 __field(unsigned, access)
235 ),
236
237 TP_fast_assign(
238 __entry->addr = addr;
239 __entry->gfn = gfn;
240 __entry->access = access;
241 ),
242
243 TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
244 __entry->access)
245);
246
247TRACE_EVENT(
248 fast_page_fault,
249 TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code,
250 u64 *sptep, u64 old_spte, int ret),
251 TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, ret),
252
253 TP_STRUCT__entry(
254 __field(int, vcpu_id)
255 __field(gpa_t, cr2_or_gpa)
256 __field(u32, error_code)
257 __field(u64 *, sptep)
258 __field(u64, old_spte)
259 __field(u64, new_spte)
260 __field(int, ret)
261 ),
262
263 TP_fast_assign(
264 __entry->vcpu_id = vcpu->vcpu_id;
265 __entry->cr2_or_gpa = cr2_or_gpa;
266 __entry->error_code = error_code;
267 __entry->sptep = sptep;
268 __entry->old_spte = old_spte;
269 __entry->new_spte = *sptep;
270 __entry->ret = ret;
271 ),
272
273 TP_printk("vcpu %d gva %llx error_code %s sptep %p old %#llx"
274 " new %llx spurious %d fixed %d", __entry->vcpu_id,
275 __entry->cr2_or_gpa, __print_flags(__entry->error_code, "|",
276 kvm_mmu_trace_pferr_flags), __entry->sptep,
277 __entry->old_spte, __entry->new_spte,
278 __entry->ret == RET_PF_SPURIOUS, __entry->ret == RET_PF_FIXED
279 )
280);
281
282TRACE_EVENT(
283 kvm_mmu_zap_all_fast,
284 TP_PROTO(struct kvm *kvm),
285 TP_ARGS(kvm),
286
287 TP_STRUCT__entry(
288 __field(__u8, mmu_valid_gen)
289 __field(unsigned int, mmu_used_pages)
290 ),
291
292 TP_fast_assign(
293 __entry->mmu_valid_gen = kvm->arch.mmu_valid_gen;
294 __entry->mmu_used_pages = kvm->arch.n_used_mmu_pages;
295 ),
296
297 TP_printk("kvm-mmu-valid-gen %u used_pages %x",
298 __entry->mmu_valid_gen, __entry->mmu_used_pages
299 )
300);
301
302
303TRACE_EVENT(
304 check_mmio_spte,
305 TP_PROTO(u64 spte, unsigned int kvm_gen, unsigned int spte_gen),
306 TP_ARGS(spte, kvm_gen, spte_gen),
307
308 TP_STRUCT__entry(
309 __field(unsigned int, kvm_gen)
310 __field(unsigned int, spte_gen)
311 __field(u64, spte)
312 ),
313
314 TP_fast_assign(
315 __entry->kvm_gen = kvm_gen;
316 __entry->spte_gen = spte_gen;
317 __entry->spte = spte;
318 ),
319
320 TP_printk("spte %llx kvm_gen %x spte-gen %x valid %d", __entry->spte,
321 __entry->kvm_gen, __entry->spte_gen,
322 __entry->kvm_gen == __entry->spte_gen
323 )
324);
325
326TRACE_EVENT(
327 kvm_mmu_set_spte,
328 TP_PROTO(int level, gfn_t gfn, u64 *sptep),
329 TP_ARGS(level, gfn, sptep),
330
331 TP_STRUCT__entry(
332 __field(u64, gfn)
333 __field(u64, spte)
334 __field(u64, sptep)
335 __field(u8, level)
336
337 __field(bool, r)
338 __field(bool, x)
339 __field(signed char, u)
340 ),
341
342 TP_fast_assign(
343 __entry->gfn = gfn;
344 __entry->spte = *sptep;
345 __entry->sptep = virt_to_phys(sptep);
346 __entry->level = level;
347 __entry->r = shadow_present_mask || (__entry->spte & PT_PRESENT_MASK);
348 __entry->x = is_executable_pte(__entry->spte);
349 __entry->u = shadow_user_mask ? !!(__entry->spte & shadow_user_mask) : -1;
350 ),
351
352 TP_printk("gfn %llx spte %llx (%s%s%s%s) level %d at %llx",
353 __entry->gfn, __entry->spte,
354 __entry->r ? "r" : "-",
355 __entry->spte & PT_WRITABLE_MASK ? "w" : "-",
356 __entry->x ? "x" : "-",
357 __entry->u == -1 ? "" : (__entry->u ? "u" : "-"),
358 __entry->level, __entry->sptep
359 )
360);
361
362TRACE_EVENT(
363 kvm_mmu_spte_requested,
364 TP_PROTO(gpa_t addr, int level, kvm_pfn_t pfn),
365 TP_ARGS(addr, level, pfn),
366
367 TP_STRUCT__entry(
368 __field(u64, gfn)
369 __field(u64, pfn)
370 __field(u8, level)
371 ),
372
373 TP_fast_assign(
374 __entry->gfn = addr >> PAGE_SHIFT;
375 __entry->pfn = pfn | (__entry->gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
376 __entry->level = level;
377 ),
378
379 TP_printk("gfn %llx pfn %llx level %d",
380 __entry->gfn, __entry->pfn, __entry->level
381 )
382);
383
384TRACE_EVENT(
385 kvm_tdp_mmu_spte_changed,
386 TP_PROTO(int as_id, gfn_t gfn, int level, u64 old_spte, u64 new_spte),
387 TP_ARGS(as_id, gfn, level, old_spte, new_spte),
388
389 TP_STRUCT__entry(
390 __field(u64, gfn)
391 __field(u64, old_spte)
392 __field(u64, new_spte)
393
394 __field(u8, level)
395
396 __field(u8, as_id)
397 ),
398
399 TP_fast_assign(
400 __entry->gfn = gfn;
401 __entry->old_spte = old_spte;
402 __entry->new_spte = new_spte;
403 __entry->level = level;
404 __entry->as_id = as_id;
405 ),
406
407 TP_printk("as id %d gfn %llx level %d old_spte %llx new_spte %llx",
408 __entry->as_id, __entry->gfn, __entry->level,
409 __entry->old_spte, __entry->new_spte
410 )
411);
412
413#endif
414
415#undef TRACE_INCLUDE_PATH
416#define TRACE_INCLUDE_PATH mmu
417#undef TRACE_INCLUDE_FILE
418#define TRACE_INCLUDE_FILE mmutrace
419
420
421#include <trace/define_trace.h>
422