1
2#if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
3#define _TRACE_KVMMMU_H
4
5#include <linux/tracepoint.h>
6#include <linux/trace_events.h>
7
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM kvmmmu
10
11#define KVM_MMU_PAGE_FIELDS \
12 __field(__u8, mmu_valid_gen) \
13 __field(__u64, gfn) \
14 __field(__u32, role) \
15 __field(__u32, root_count) \
16 __field(bool, unsync)
17
18#define KVM_MMU_PAGE_ASSIGN(sp) \
19 __entry->mmu_valid_gen = sp->mmu_valid_gen; \
20 __entry->gfn = sp->gfn; \
21 __entry->role = sp->role.word; \
22 __entry->root_count = sp->root_count; \
23 __entry->unsync = sp->unsync;
24
25#define KVM_MMU_PAGE_PRINTK() ({ \
26 const char *saved_ptr = trace_seq_buffer_ptr(p); \
27 static const char *access_str[] = { \
28 "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \
29 }; \
30 union kvm_mmu_page_role role; \
31 \
32 role.word = __entry->role; \
33 \
34 trace_seq_printf(p, "sp gen %u gfn %llx l%u %u-byte q%u%s %s%s" \
35 " %snxe %sad root %u %s%c", \
36 __entry->mmu_valid_gen, \
37 __entry->gfn, role.level, \
38 role.gpte_is_8_bytes ? 8 : 4, \
39 role.quadrant, \
40 role.direct ? " direct" : "", \
41 access_str[role.access], \
42 role.invalid ? " invalid" : "", \
43 role.efer_nx ? "" : "!", \
44 role.ad_disabled ? "!" : "", \
45 __entry->root_count, \
46 __entry->unsync ? "unsync" : "sync", 0); \
47 saved_ptr; \
48 })
49
50#define kvm_mmu_trace_pferr_flags \
51 { PFERR_PRESENT_MASK, "P" }, \
52 { PFERR_WRITE_MASK, "W" }, \
53 { PFERR_USER_MASK, "U" }, \
54 { PFERR_RSVD_MASK, "RSVD" }, \
55 { PFERR_FETCH_MASK, "F" }
56
57TRACE_DEFINE_ENUM(RET_PF_RETRY);
58TRACE_DEFINE_ENUM(RET_PF_EMULATE);
59TRACE_DEFINE_ENUM(RET_PF_INVALID);
60TRACE_DEFINE_ENUM(RET_PF_FIXED);
61TRACE_DEFINE_ENUM(RET_PF_SPURIOUS);
62
63
64
65
66TRACE_EVENT(
67 kvm_mmu_pagetable_walk,
68 TP_PROTO(u64 addr, u32 pferr),
69 TP_ARGS(addr, pferr),
70
71 TP_STRUCT__entry(
72 __field(__u64, addr)
73 __field(__u32, pferr)
74 ),
75
76 TP_fast_assign(
77 __entry->addr = addr;
78 __entry->pferr = pferr;
79 ),
80
81 TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
82 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
83);
84
85
86
87TRACE_EVENT(
88 kvm_mmu_paging_element,
89 TP_PROTO(u64 pte, int level),
90 TP_ARGS(pte, level),
91
92 TP_STRUCT__entry(
93 __field(__u64, pte)
94 __field(__u32, level)
95 ),
96
97 TP_fast_assign(
98 __entry->pte = pte;
99 __entry->level = level;
100 ),
101
102 TP_printk("pte %llx level %u", __entry->pte, __entry->level)
103);
104
105DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class,
106
107 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
108
109 TP_ARGS(table_gfn, index, size),
110
111 TP_STRUCT__entry(
112 __field(__u64, gpa)
113 ),
114
115 TP_fast_assign(
116 __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
117 + index * size;
118 ),
119
120 TP_printk("gpa %llx", __entry->gpa)
121);
122
123
124DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
125
126 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
127
128 TP_ARGS(table_gfn, index, size)
129);
130
131
132DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
133
134 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
135
136 TP_ARGS(table_gfn, index, size)
137);
138
139TRACE_EVENT(
140 kvm_mmu_walker_error,
141 TP_PROTO(u32 pferr),
142 TP_ARGS(pferr),
143
144 TP_STRUCT__entry(
145 __field(__u32, pferr)
146 ),
147
148 TP_fast_assign(
149 __entry->pferr = pferr;
150 ),
151
152 TP_printk("pferr %x %s", __entry->pferr,
153 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
154);
155
156TRACE_EVENT(
157 kvm_mmu_get_page,
158 TP_PROTO(struct kvm_mmu_page *sp, bool created),
159 TP_ARGS(sp, created),
160
161 TP_STRUCT__entry(
162 KVM_MMU_PAGE_FIELDS
163 __field(bool, created)
164 ),
165
166 TP_fast_assign(
167 KVM_MMU_PAGE_ASSIGN(sp)
168 __entry->created = created;
169 ),
170
171 TP_printk("%s %s", KVM_MMU_PAGE_PRINTK(),
172 __entry->created ? "new" : "existing")
173);
174
175DECLARE_EVENT_CLASS(kvm_mmu_page_class,
176
177 TP_PROTO(struct kvm_mmu_page *sp),
178 TP_ARGS(sp),
179
180 TP_STRUCT__entry(
181 KVM_MMU_PAGE_FIELDS
182 ),
183
184 TP_fast_assign(
185 KVM_MMU_PAGE_ASSIGN(sp)
186 ),
187
188 TP_printk("%s", KVM_MMU_PAGE_PRINTK())
189);
190
191DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_sync_page,
192 TP_PROTO(struct kvm_mmu_page *sp),
193
194 TP_ARGS(sp)
195);
196
197DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page,
198 TP_PROTO(struct kvm_mmu_page *sp),
199
200 TP_ARGS(sp)
201);
202
203DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
204 TP_PROTO(struct kvm_mmu_page *sp),
205
206 TP_ARGS(sp)
207);
208
209TRACE_EVENT(
210 mark_mmio_spte,
211 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte),
212 TP_ARGS(sptep, gfn, spte),
213
214 TP_STRUCT__entry(
215 __field(void *, sptep)
216 __field(gfn_t, gfn)
217 __field(unsigned, access)
218 __field(unsigned int, gen)
219 ),
220
221 TP_fast_assign(
222 __entry->sptep = sptep;
223 __entry->gfn = gfn;
224 __entry->access = spte & ACC_ALL;
225 __entry->gen = get_mmio_spte_generation(spte);
226 ),
227
228 TP_printk("sptep:%p gfn %llx access %x gen %x", __entry->sptep,
229 __entry->gfn, __entry->access, __entry->gen)
230);
231
232TRACE_EVENT(
233 handle_mmio_page_fault,
234 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
235 TP_ARGS(addr, gfn, access),
236
237 TP_STRUCT__entry(
238 __field(u64, addr)
239 __field(gfn_t, gfn)
240 __field(unsigned, access)
241 ),
242
243 TP_fast_assign(
244 __entry->addr = addr;
245 __entry->gfn = gfn;
246 __entry->access = access;
247 ),
248
249 TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
250 __entry->access)
251);
252
253TRACE_EVENT(
254 fast_page_fault,
255 TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code,
256 u64 *sptep, u64 old_spte, int ret),
257 TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, ret),
258
259 TP_STRUCT__entry(
260 __field(int, vcpu_id)
261 __field(gpa_t, cr2_or_gpa)
262 __field(u32, error_code)
263 __field(u64 *, sptep)
264 __field(u64, old_spte)
265 __field(u64, new_spte)
266 __field(int, ret)
267 ),
268
269 TP_fast_assign(
270 __entry->vcpu_id = vcpu->vcpu_id;
271 __entry->cr2_or_gpa = cr2_or_gpa;
272 __entry->error_code = error_code;
273 __entry->sptep = sptep;
274 __entry->old_spte = old_spte;
275 __entry->new_spte = *sptep;
276 __entry->ret = ret;
277 ),
278
279 TP_printk("vcpu %d gva %llx error_code %s sptep %p old %#llx"
280 " new %llx spurious %d fixed %d", __entry->vcpu_id,
281 __entry->cr2_or_gpa, __print_flags(__entry->error_code, "|",
282 kvm_mmu_trace_pferr_flags), __entry->sptep,
283 __entry->old_spte, __entry->new_spte,
284 __entry->ret == RET_PF_SPURIOUS, __entry->ret == RET_PF_FIXED
285 )
286);
287
288TRACE_EVENT(
289 kvm_mmu_zap_all_fast,
290 TP_PROTO(struct kvm *kvm),
291 TP_ARGS(kvm),
292
293 TP_STRUCT__entry(
294 __field(__u8, mmu_valid_gen)
295 __field(unsigned int, mmu_used_pages)
296 ),
297
298 TP_fast_assign(
299 __entry->mmu_valid_gen = kvm->arch.mmu_valid_gen;
300 __entry->mmu_used_pages = kvm->arch.n_used_mmu_pages;
301 ),
302
303 TP_printk("kvm-mmu-valid-gen %u used_pages %x",
304 __entry->mmu_valid_gen, __entry->mmu_used_pages
305 )
306);
307
308
309TRACE_EVENT(
310 check_mmio_spte,
311 TP_PROTO(u64 spte, unsigned int kvm_gen, unsigned int spte_gen),
312 TP_ARGS(spte, kvm_gen, spte_gen),
313
314 TP_STRUCT__entry(
315 __field(unsigned int, kvm_gen)
316 __field(unsigned int, spte_gen)
317 __field(u64, spte)
318 ),
319
320 TP_fast_assign(
321 __entry->kvm_gen = kvm_gen;
322 __entry->spte_gen = spte_gen;
323 __entry->spte = spte;
324 ),
325
326 TP_printk("spte %llx kvm_gen %x spte-gen %x valid %d", __entry->spte,
327 __entry->kvm_gen, __entry->spte_gen,
328 __entry->kvm_gen == __entry->spte_gen
329 )
330);
331
332TRACE_EVENT(
333 kvm_mmu_set_spte,
334 TP_PROTO(int level, gfn_t gfn, u64 *sptep),
335 TP_ARGS(level, gfn, sptep),
336
337 TP_STRUCT__entry(
338 __field(u64, gfn)
339 __field(u64, spte)
340 __field(u64, sptep)
341 __field(u8, level)
342
343 __field(bool, r)
344 __field(bool, x)
345 __field(signed char, u)
346 ),
347
348 TP_fast_assign(
349 __entry->gfn = gfn;
350 __entry->spte = *sptep;
351 __entry->sptep = virt_to_phys(sptep);
352 __entry->level = level;
353 __entry->r = shadow_present_mask || (__entry->spte & PT_PRESENT_MASK);
354 __entry->x = is_executable_pte(__entry->spte);
355 __entry->u = shadow_user_mask ? !!(__entry->spte & shadow_user_mask) : -1;
356 ),
357
358 TP_printk("gfn %llx spte %llx (%s%s%s%s) level %d at %llx",
359 __entry->gfn, __entry->spte,
360 __entry->r ? "r" : "-",
361 __entry->spte & PT_WRITABLE_MASK ? "w" : "-",
362 __entry->x ? "x" : "-",
363 __entry->u == -1 ? "" : (__entry->u ? "u" : "-"),
364 __entry->level, __entry->sptep
365 )
366);
367
368TRACE_EVENT(
369 kvm_mmu_spte_requested,
370 TP_PROTO(gpa_t addr, int level, kvm_pfn_t pfn),
371 TP_ARGS(addr, level, pfn),
372
373 TP_STRUCT__entry(
374 __field(u64, gfn)
375 __field(u64, pfn)
376 __field(u8, level)
377 ),
378
379 TP_fast_assign(
380 __entry->gfn = addr >> PAGE_SHIFT;
381 __entry->pfn = pfn | (__entry->gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
382 __entry->level = level;
383 ),
384
385 TP_printk("gfn %llx pfn %llx level %d",
386 __entry->gfn, __entry->pfn, __entry->level
387 )
388);
389
390TRACE_EVENT(
391 kvm_tdp_mmu_spte_changed,
392 TP_PROTO(int as_id, gfn_t gfn, int level, u64 old_spte, u64 new_spte),
393 TP_ARGS(as_id, gfn, level, old_spte, new_spte),
394
395 TP_STRUCT__entry(
396 __field(u64, gfn)
397 __field(u64, old_spte)
398 __field(u64, new_spte)
399
400 __field(u8, level)
401
402 __field(u8, as_id)
403 ),
404
405 TP_fast_assign(
406 __entry->gfn = gfn;
407 __entry->old_spte = old_spte;
408 __entry->new_spte = new_spte;
409 __entry->level = level;
410 __entry->as_id = as_id;
411 ),
412
413 TP_printk("as id %d gfn %llx level %d old_spte %llx new_spte %llx",
414 __entry->as_id, __entry->gfn, __entry->level,
415 __entry->old_spte, __entry->new_spte
416 )
417);
418
419#endif
420
421#undef TRACE_INCLUDE_PATH
422#define TRACE_INCLUDE_PATH mmu
423#undef TRACE_INCLUDE_FILE
424#define TRACE_INCLUDE_FILE mmutrace
425
426
427#include <trace/define_trace.h>
428