1/* SPDX-License-Identifier: GPL-2.0 */
2#if !defined(_TRACE_ARM_ARM64_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
3#define _TRACE_ARM_ARM64_KVM_H
4
5#include <asm/kvm_emulate.h>
6#include <kvm/arm_arch_timer.h>
7#include <linux/tracepoint.h>
8
9#undef TRACE_SYSTEM
10#define TRACE_SYSTEM kvm
11
12/*
13 * Tracepoints for entry/exit to guest
14 */
15TRACE_EVENT(kvm_entry,
16 TP_PROTO(unsigned long vcpu_pc),
17 TP_ARGS(vcpu_pc),
18
19 TP_STRUCT__entry(
20 __field( unsigned long, vcpu_pc )
21 ),
22
23 TP_fast_assign(
24 __entry->vcpu_pc = vcpu_pc;
25 ),
26
27 TP_printk("PC: 0x%016lx", __entry->vcpu_pc)
28);
29
30TRACE_EVENT(kvm_exit,
31 TP_PROTO(int ret, unsigned int esr_ec, unsigned long vcpu_pc),
32 TP_ARGS(ret, esr_ec, vcpu_pc),
33
34 TP_STRUCT__entry(
35 __field( int, ret )
36 __field( unsigned int, esr_ec )
37 __field( unsigned long, vcpu_pc )
38 ),
39
40 TP_fast_assign(
41 __entry->ret = ARM_EXCEPTION_CODE(ret);
42 __entry->esr_ec = ARM_EXCEPTION_IS_TRAP(ret) ? esr_ec : 0;
43 __entry->vcpu_pc = vcpu_pc;
44 ),
45
46 TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%016lx",
47 __print_symbolic(__entry->ret, kvm_arm_exception_type),
48 __entry->esr_ec,
49 __print_symbolic(__entry->esr_ec, kvm_arm_exception_class),
50 __entry->vcpu_pc)
51);
52
53TRACE_EVENT(kvm_guest_fault,
54 TP_PROTO(unsigned long vcpu_pc, unsigned long hsr,
55 unsigned long hxfar,
56 unsigned long long ipa),
57 TP_ARGS(vcpu_pc, hsr, hxfar, ipa),
58
59 TP_STRUCT__entry(
60 __field( unsigned long, vcpu_pc )
61 __field( unsigned long, hsr )
62 __field( unsigned long, hxfar )
63 __field( unsigned long long, ipa )
64 ),
65
66 TP_fast_assign(
67 __entry->vcpu_pc = vcpu_pc;
68 __entry->hsr = hsr;
69 __entry->hxfar = hxfar;
70 __entry->ipa = ipa;
71 ),
72
73 TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#016lx",
74 __entry->ipa, __entry->hsr,
75 __entry->hxfar, __entry->vcpu_pc)
76);
77
78TRACE_EVENT(kvm_access_fault,
79 TP_PROTO(unsigned long ipa),
80 TP_ARGS(ipa),
81
82 TP_STRUCT__entry(
83 __field( unsigned long, ipa )
84 ),
85
86 TP_fast_assign(
87 __entry->ipa = ipa;
88 ),
89
90 TP_printk("IPA: %lx", __entry->ipa)
91);
92
93TRACE_EVENT(kvm_irq_line,
94 TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level),
95 TP_ARGS(type, vcpu_idx, irq_num, level),
96
97 TP_STRUCT__entry(
98 __field( unsigned int, type )
99 __field( int, vcpu_idx )
100 __field( int, irq_num )
101 __field( int, level )
102 ),
103
104 TP_fast_assign(
105 __entry->type = type;
106 __entry->vcpu_idx = vcpu_idx;
107 __entry->irq_num = irq_num;
108 __entry->level = level;
109 ),
110
111 TP_printk("Inject %s interrupt (%d), vcpu->idx: %d, num: %d, level: %d",
112 (__entry->type == KVM_ARM_IRQ_TYPE_CPU) ? "CPU" :
113 (__entry->type == KVM_ARM_IRQ_TYPE_PPI) ? "VGIC PPI" :
114 (__entry->type == KVM_ARM_IRQ_TYPE_SPI) ? "VGIC SPI" : "UNKNOWN",
115 __entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level)
116);
117
118TRACE_EVENT(kvm_mmio_emulate,
119 TP_PROTO(unsigned long vcpu_pc, unsigned long instr,
120 unsigned long cpsr),
121 TP_ARGS(vcpu_pc, instr, cpsr),
122
123 TP_STRUCT__entry(
124 __field( unsigned long, vcpu_pc )
125 __field( unsigned long, instr )
126 __field( unsigned long, cpsr )
127 ),
128
129 TP_fast_assign(
130 __entry->vcpu_pc = vcpu_pc;
131 __entry->instr = instr;
132 __entry->cpsr = cpsr;
133 ),
134
135 TP_printk("Emulate MMIO at: 0x%016lx (instr: %08lx, cpsr: %08lx)",
136 __entry->vcpu_pc, __entry->instr, __entry->cpsr)
137);
138
139TRACE_EVENT(kvm_mmio_nisv,
140 TP_PROTO(unsigned long vcpu_pc, unsigned long esr,
141 unsigned long far, unsigned long ipa),
142 TP_ARGS(vcpu_pc, esr, far, ipa),
143
144 TP_STRUCT__entry(
145 __field( unsigned long, vcpu_pc )
146 __field( unsigned long, esr )
147 __field( unsigned long, far )
148 __field( unsigned long, ipa )
149 ),
150
151 TP_fast_assign(
152 __entry->vcpu_pc = vcpu_pc;
153 __entry->esr = esr;
154 __entry->far = far;
155 __entry->ipa = ipa;
156 ),
157
158 TP_printk("ipa %#016lx, esr %#016lx, far %#016lx, pc %#016lx",
159 __entry->ipa, __entry->esr,
160 __entry->far, __entry->vcpu_pc)
161);
162
163
164TRACE_EVENT(kvm_set_way_flush,
165 TP_PROTO(unsigned long vcpu_pc, bool cache),
166 TP_ARGS(vcpu_pc, cache),
167
168 TP_STRUCT__entry(
169 __field( unsigned long, vcpu_pc )
170 __field( bool, cache )
171 ),
172
173 TP_fast_assign(
174 __entry->vcpu_pc = vcpu_pc;
175 __entry->cache = cache;
176 ),
177
178 TP_printk("S/W flush at 0x%016lx (cache %s)",
179 __entry->vcpu_pc, __entry->cache ? "on" : "off")
180);
181
182TRACE_EVENT(kvm_toggle_cache,
183 TP_PROTO(unsigned long vcpu_pc, bool was, bool now),
184 TP_ARGS(vcpu_pc, was, now),
185
186 TP_STRUCT__entry(
187 __field( unsigned long, vcpu_pc )
188 __field( bool, was )
189 __field( bool, now )
190 ),
191
192 TP_fast_assign(
193 __entry->vcpu_pc = vcpu_pc;
194 __entry->was = was;
195 __entry->now = now;
196 ),
197
198 TP_printk("VM op at 0x%016lx (cache was %s, now %s)",
199 __entry->vcpu_pc, __entry->was ? "on" : "off",
200 __entry->now ? "on" : "off")
201);
202
203/*
204 * Tracepoints for arch_timer
205 */
206TRACE_EVENT(kvm_timer_update_irq,
207 TP_PROTO(unsigned long vcpu_id, __u32 irq, int level),
208 TP_ARGS(vcpu_id, irq, level),
209
210 TP_STRUCT__entry(
211 __field( unsigned long, vcpu_id )
212 __field( __u32, irq )
213 __field( int, level )
214 ),
215
216 TP_fast_assign(
217 __entry->vcpu_id = vcpu_id;
218 __entry->irq = irq;
219 __entry->level = level;
220 ),
221
222 TP_printk("VCPU: %ld, IRQ %d, level %d",
223 __entry->vcpu_id, __entry->irq, __entry->level)
224);
225
226TRACE_EVENT(kvm_get_timer_map,
227 TP_PROTO(unsigned long vcpu_id, struct timer_map *map),
228 TP_ARGS(vcpu_id, map),
229
230 TP_STRUCT__entry(
231 __field( unsigned long, vcpu_id )
232 __field( int, direct_vtimer )
233 __field( int, direct_ptimer )
234 __field( int, emul_vtimer )
235 __field( int, emul_ptimer )
236 ),
237
238 TP_fast_assign(
239 __entry->vcpu_id = vcpu_id;
240 __entry->direct_vtimer = arch_timer_ctx_index(map->direct_vtimer);
241 __entry->direct_ptimer =
242 (map->direct_ptimer) ? arch_timer_ctx_index(map->direct_ptimer) : -1;
243 __entry->emul_vtimer =
244 (map->emul_vtimer) ? arch_timer_ctx_index(map->emul_vtimer) : -1;
245 __entry->emul_ptimer =
246 (map->emul_ptimer) ? arch_timer_ctx_index(map->emul_ptimer) : -1;
247 ),
248
249 TP_printk("VCPU: %ld, dv: %d, dp: %d, ev: %d, ep: %d",
250 __entry->vcpu_id,
251 __entry->direct_vtimer,
252 __entry->direct_ptimer,
253 __entry->emul_vtimer,
254 __entry->emul_ptimer)
255);
256
257TRACE_EVENT(kvm_timer_save_state,
258 TP_PROTO(struct arch_timer_context *ctx),
259 TP_ARGS(ctx),
260
261 TP_STRUCT__entry(
262 __field( unsigned long, ctl )
263 __field( unsigned long long, cval )
264 __field( int, timer_idx )
265 ),
266
267 TP_fast_assign(
268 __entry->ctl = timer_get_ctl(ctx);
269 __entry->cval = timer_get_cval(ctx);
270 __entry->timer_idx = arch_timer_ctx_index(ctx);
271 ),
272
273 TP_printk(" CTL: %#08lx CVAL: %#16llx arch_timer_ctx_index: %d",
274 __entry->ctl,
275 __entry->cval,
276 __entry->timer_idx)
277);
278
279TRACE_EVENT(kvm_timer_restore_state,
280 TP_PROTO(struct arch_timer_context *ctx),
281 TP_ARGS(ctx),
282
283 TP_STRUCT__entry(
284 __field( unsigned long, ctl )
285 __field( unsigned long long, cval )
286 __field( int, timer_idx )
287 ),
288
289 TP_fast_assign(
290 __entry->ctl = timer_get_ctl(ctx);
291 __entry->cval = timer_get_cval(ctx);
292 __entry->timer_idx = arch_timer_ctx_index(ctx);
293 ),
294
295 TP_printk("CTL: %#08lx CVAL: %#16llx arch_timer_ctx_index: %d",
296 __entry->ctl,
297 __entry->cval,
298 __entry->timer_idx)
299);
300
301TRACE_EVENT(kvm_timer_hrtimer_expire,
302 TP_PROTO(struct arch_timer_context *ctx),
303 TP_ARGS(ctx),
304
305 TP_STRUCT__entry(
306 __field( int, timer_idx )
307 ),
308
309 TP_fast_assign(
310 __entry->timer_idx = arch_timer_ctx_index(ctx);
311 ),
312
313 TP_printk("arch_timer_ctx_index: %d", __entry->timer_idx)
314);
315
316TRACE_EVENT(kvm_timer_emulate,
317 TP_PROTO(struct arch_timer_context *ctx, bool should_fire),
318 TP_ARGS(ctx, should_fire),
319
320 TP_STRUCT__entry(
321 __field( int, timer_idx )
322 __field( bool, should_fire )
323 ),
324
325 TP_fast_assign(
326 __entry->timer_idx = arch_timer_ctx_index(ctx);
327 __entry->should_fire = should_fire;
328 ),
329
330 TP_printk("arch_timer_ctx_index: %d (should_fire: %d)",
331 __entry->timer_idx, __entry->should_fire)
332);
333
334TRACE_EVENT(kvm_nested_eret,
335 TP_PROTO(struct kvm_vcpu *vcpu, unsigned long elr_el2,
336 unsigned long spsr_el2),
337 TP_ARGS(vcpu, elr_el2, spsr_el2),
338
339 TP_STRUCT__entry(
340 __field(struct kvm_vcpu *, vcpu)
341 __field(unsigned long, elr_el2)
342 __field(unsigned long, spsr_el2)
343 __field(unsigned long, target_mode)
344 __field(unsigned long, hcr_el2)
345 ),
346
347 TP_fast_assign(
348 __entry->vcpu = vcpu;
349 __entry->elr_el2 = elr_el2;
350 __entry->spsr_el2 = spsr_el2;
351 __entry->target_mode = spsr_el2 & (PSR_MODE_MASK | PSR_MODE32_BIT);
352 __entry->hcr_el2 = __vcpu_sys_reg(vcpu, HCR_EL2);
353 ),
354
355 TP_printk("elr_el2: 0x%lx spsr_el2: 0x%08lx (M: %s) hcr_el2: %lx",
356 __entry->elr_el2, __entry->spsr_el2,
357 __print_symbolic(__entry->target_mode, kvm_mode_names),
358 __entry->hcr_el2)
359);
360
361TRACE_EVENT(kvm_inject_nested_exception,
362 TP_PROTO(struct kvm_vcpu *vcpu, u64 esr_el2, int type),
363 TP_ARGS(vcpu, esr_el2, type),
364
365 TP_STRUCT__entry(
366 __field(struct kvm_vcpu *, vcpu)
367 __field(unsigned long, esr_el2)
368 __field(int, type)
369 __field(unsigned long, spsr_el2)
370 __field(unsigned long, pc)
371 __field(unsigned long, source_mode)
372 __field(unsigned long, hcr_el2)
373 ),
374
375 TP_fast_assign(
376 __entry->vcpu = vcpu;
377 __entry->esr_el2 = esr_el2;
378 __entry->type = type;
379 __entry->spsr_el2 = *vcpu_cpsr(vcpu);
380 __entry->pc = *vcpu_pc(vcpu);
381 __entry->source_mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
382 __entry->hcr_el2 = __vcpu_sys_reg(vcpu, HCR_EL2);
383 ),
384
385 TP_printk("%s: esr_el2 0x%lx elr_el2: 0x%lx spsr_el2: 0x%08lx (M: %s) hcr_el2: %lx",
386 __print_symbolic(__entry->type, kvm_exception_type_names),
387 __entry->esr_el2, __entry->pc, __entry->spsr_el2,
388 __print_symbolic(__entry->source_mode, kvm_mode_names),
389 __entry->hcr_el2)
390);
391
392TRACE_EVENT(kvm_forward_sysreg_trap,
393 TP_PROTO(struct kvm_vcpu *vcpu, u32 sysreg, bool is_read),
394 TP_ARGS(vcpu, sysreg, is_read),
395
396 TP_STRUCT__entry(
397 __field(u64, pc)
398 __field(u32, sysreg)
399 __field(bool, is_read)
400 ),
401
402 TP_fast_assign(
403 __entry->pc = *vcpu_pc(vcpu);
404 __entry->sysreg = sysreg;
405 __entry->is_read = is_read;
406 ),
407
408 TP_printk("%llx %c (%d,%d,%d,%d,%d)",
409 __entry->pc,
410 __entry->is_read ? 'R' : 'W',
411 sys_reg_Op0(__entry->sysreg),
412 sys_reg_Op1(__entry->sysreg),
413 sys_reg_CRn(__entry->sysreg),
414 sys_reg_CRm(__entry->sysreg),
415 sys_reg_Op2(__entry->sysreg))
416);
417
418#endif /* _TRACE_ARM_ARM64_KVM_H */
419
420#undef TRACE_INCLUDE_PATH
421#define TRACE_INCLUDE_PATH .
422#undef TRACE_INCLUDE_FILE
423#define TRACE_INCLUDE_FILE trace_arm
424
425/* This part must be outside protection */
426#include <trace/define_trace.h>
427

source code of linux/arch/arm64/kvm/trace_arm.h