1/* SPDX-License-Identifier: GPL-2.0 */
2
3#if !defined(_TRACE_KVM_PR_H) || defined(TRACE_HEADER_MULTI_READ)
4#define _TRACE_KVM_PR_H
5
6#include <linux/tracepoint.h>
7#include "trace_book3s.h"
8
9#undef TRACE_SYSTEM
10#define TRACE_SYSTEM kvm_pr
11
12TRACE_EVENT(kvm_book3s_reenter,
13 TP_PROTO(int r, struct kvm_vcpu *vcpu),
14 TP_ARGS(r, vcpu),
15
16 TP_STRUCT__entry(
17 __field( unsigned int, r )
18 __field( unsigned long, pc )
19 ),
20
21 TP_fast_assign(
22 __entry->r = r;
23 __entry->pc = kvmppc_get_pc(vcpu);
24 ),
25
26 TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc)
27);
28
29#ifdef CONFIG_PPC_BOOK3S_64
30
31TRACE_EVENT(kvm_book3s_64_mmu_map,
32 TP_PROTO(int rflags, ulong hpteg, ulong va, kvm_pfn_t hpaddr,
33 struct kvmppc_pte *orig_pte),
34 TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte),
35
36 TP_STRUCT__entry(
37 __field( unsigned char, flag_w )
38 __field( unsigned char, flag_x )
39 __field( unsigned long, eaddr )
40 __field( unsigned long, hpteg )
41 __field( unsigned long, va )
42 __field( unsigned long long, vpage )
43 __field( unsigned long, hpaddr )
44 ),
45
46 TP_fast_assign(
47 __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w';
48 __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x';
49 __entry->eaddr = orig_pte->eaddr;
50 __entry->hpteg = hpteg;
51 __entry->va = va;
52 __entry->vpage = orig_pte->vpage;
53 __entry->hpaddr = hpaddr;
54 ),
55
56 TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx",
57 __entry->flag_w, __entry->flag_x, __entry->eaddr,
58 __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr)
59);
60
61#endif /* CONFIG_PPC_BOOK3S_64 */
62
63TRACE_EVENT(kvm_book3s_mmu_map,
64 TP_PROTO(struct hpte_cache *pte),
65 TP_ARGS(pte),
66
67 TP_STRUCT__entry(
68 __field( u64, host_vpn )
69 __field( u64, pfn )
70 __field( ulong, eaddr )
71 __field( u64, vpage )
72 __field( ulong, raddr )
73 __field( int, flags )
74 ),
75
76 TP_fast_assign(
77 __entry->host_vpn = pte->host_vpn;
78 __entry->pfn = pte->pfn;
79 __entry->eaddr = pte->pte.eaddr;
80 __entry->vpage = pte->pte.vpage;
81 __entry->raddr = pte->pte.raddr;
82 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
83 (pte->pte.may_write ? 0x2 : 0) |
84 (pte->pte.may_execute ? 0x1 : 0);
85 ),
86
87 TP_printk("Map: hvpn=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
88 __entry->host_vpn, __entry->pfn, __entry->eaddr,
89 __entry->vpage, __entry->raddr, __entry->flags)
90);
91
92TRACE_EVENT(kvm_book3s_mmu_invalidate,
93 TP_PROTO(struct hpte_cache *pte),
94 TP_ARGS(pte),
95
96 TP_STRUCT__entry(
97 __field( u64, host_vpn )
98 __field( u64, pfn )
99 __field( ulong, eaddr )
100 __field( u64, vpage )
101 __field( ulong, raddr )
102 __field( int, flags )
103 ),
104
105 TP_fast_assign(
106 __entry->host_vpn = pte->host_vpn;
107 __entry->pfn = pte->pfn;
108 __entry->eaddr = pte->pte.eaddr;
109 __entry->vpage = pte->pte.vpage;
110 __entry->raddr = pte->pte.raddr;
111 __entry->flags = (pte->pte.may_read ? 0x4 : 0) |
112 (pte->pte.may_write ? 0x2 : 0) |
113 (pte->pte.may_execute ? 0x1 : 0);
114 ),
115
116 TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]",
117 __entry->host_vpn, __entry->pfn, __entry->eaddr,
118 __entry->vpage, __entry->raddr, __entry->flags)
119);
120
121TRACE_EVENT(kvm_book3s_mmu_flush,
122 TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1,
123 unsigned long long p2),
124 TP_ARGS(type, vcpu, p1, p2),
125
126 TP_STRUCT__entry(
127 __field( int, count )
128 __field( unsigned long long, p1 )
129 __field( unsigned long long, p2 )
130 __field( const char *, type )
131 ),
132
133 TP_fast_assign(
134 __entry->count = to_book3s(vcpu)->hpte_cache_count;
135 __entry->p1 = p1;
136 __entry->p2 = p2;
137 __entry->type = type;
138 ),
139
140 TP_printk("Flush %d %sPTEs: %llx - %llx",
141 __entry->count, __entry->type, __entry->p1, __entry->p2)
142);
143
144TRACE_EVENT(kvm_book3s_slb_found,
145 TP_PROTO(unsigned long long gvsid, unsigned long long hvsid),
146 TP_ARGS(gvsid, hvsid),
147
148 TP_STRUCT__entry(
149 __field( unsigned long long, gvsid )
150 __field( unsigned long long, hvsid )
151 ),
152
153 TP_fast_assign(
154 __entry->gvsid = gvsid;
155 __entry->hvsid = hvsid;
156 ),
157
158 TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid)
159);
160
161TRACE_EVENT(kvm_book3s_slb_fail,
162 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid),
163 TP_ARGS(sid_map_mask, gvsid),
164
165 TP_STRUCT__entry(
166 __field( unsigned short, sid_map_mask )
167 __field( unsigned long long, gvsid )
168 ),
169
170 TP_fast_assign(
171 __entry->sid_map_mask = sid_map_mask;
172 __entry->gvsid = gvsid;
173 ),
174
175 TP_printk("%x/%x: %llx", __entry->sid_map_mask,
176 SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid)
177);
178
179TRACE_EVENT(kvm_book3s_slb_map,
180 TP_PROTO(u16 sid_map_mask, unsigned long long gvsid,
181 unsigned long long hvsid),
182 TP_ARGS(sid_map_mask, gvsid, hvsid),
183
184 TP_STRUCT__entry(
185 __field( unsigned short, sid_map_mask )
186 __field( unsigned long long, guest_vsid )
187 __field( unsigned long long, host_vsid )
188 ),
189
190 TP_fast_assign(
191 __entry->sid_map_mask = sid_map_mask;
192 __entry->guest_vsid = gvsid;
193 __entry->host_vsid = hvsid;
194 ),
195
196 TP_printk("%x: %llx -> %llx", __entry->sid_map_mask,
197 __entry->guest_vsid, __entry->host_vsid)
198);
199
200TRACE_EVENT(kvm_book3s_slbmte,
201 TP_PROTO(u64 slb_vsid, u64 slb_esid),
202 TP_ARGS(slb_vsid, slb_esid),
203
204 TP_STRUCT__entry(
205 __field( u64, slb_vsid )
206 __field( u64, slb_esid )
207 ),
208
209 TP_fast_assign(
210 __entry->slb_vsid = slb_vsid;
211 __entry->slb_esid = slb_esid;
212 ),
213
214 TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid)
215);
216
217TRACE_EVENT(kvm_exit,
218 TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu),
219 TP_ARGS(exit_nr, vcpu),
220
221 TP_STRUCT__entry(
222 __field( unsigned int, exit_nr )
223 __field( unsigned long, pc )
224 __field( unsigned long, msr )
225 __field( unsigned long, dar )
226 __field( unsigned long, srr1 )
227 __field( unsigned long, last_inst )
228 ),
229
230 TP_fast_assign(
231 __entry->exit_nr = exit_nr;
232 __entry->pc = kvmppc_get_pc(vcpu);
233 __entry->dar = kvmppc_get_fault_dar(vcpu);
234 __entry->msr = kvmppc_get_msr(vcpu);
235 __entry->srr1 = vcpu->arch.shadow_srr1;
236 __entry->last_inst = vcpu->arch.last_inst;
237 ),
238
239 TP_printk("exit=%s"
240 " | pc=0x%lx"
241 " | msr=0x%lx"
242 " | dar=0x%lx"
243 " | srr1=0x%lx"
244 " | last_inst=0x%lx"
245 ,
246 __print_symbolic(__entry->exit_nr, kvm_trace_symbol_exit),
247 __entry->pc,
248 __entry->msr,
249 __entry->dar,
250 __entry->srr1,
251 __entry->last_inst
252 )
253);
254
255#endif /* _TRACE_KVM_H */
256
257/* This part must be outside protection */
258
259#undef TRACE_INCLUDE_PATH
260#undef TRACE_INCLUDE_FILE
261
262#define TRACE_INCLUDE_PATH .
263#define TRACE_INCLUDE_FILE trace_pr
264
265#include <trace/define_trace.h>
266

source code of linux/arch/powerpc/kvm/trace_pr.h