1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2019 Western Digital Corporation or its affiliates. |
4 | * |
5 | * Authors: |
6 | * Atish Patra <atish.patra@wdc.com> |
7 | */ |
8 | |
9 | #include <linux/errno.h> |
10 | #include <linux/err.h> |
11 | #include <linux/kvm_host.h> |
12 | #include <linux/uaccess.h> |
13 | #include <clocksource/timer-riscv.h> |
14 | #include <asm/csr.h> |
15 | #include <asm/delay.h> |
16 | #include <asm/kvm_vcpu_timer.h> |
17 | |
18 | static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt) |
19 | { |
20 | return get_cycles64() + gt->time_delta; |
21 | } |
22 | |
23 | static u64 kvm_riscv_delta_cycles2ns(u64 cycles, |
24 | struct kvm_guest_timer *gt, |
25 | struct kvm_vcpu_timer *t) |
26 | { |
27 | unsigned long flags; |
28 | u64 cycles_now, cycles_delta, delta_ns; |
29 | |
30 | local_irq_save(flags); |
31 | cycles_now = kvm_riscv_current_cycles(gt); |
32 | if (cycles_now < cycles) |
33 | cycles_delta = cycles - cycles_now; |
34 | else |
35 | cycles_delta = 0; |
36 | delta_ns = (cycles_delta * gt->nsec_mult) >> gt->nsec_shift; |
37 | local_irq_restore(flags); |
38 | |
39 | return delta_ns; |
40 | } |
41 | |
42 | static enum hrtimer_restart kvm_riscv_vcpu_hrtimer_expired(struct hrtimer *h) |
43 | { |
44 | u64 delta_ns; |
45 | struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt); |
46 | struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer); |
47 | struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; |
48 | |
49 | if (kvm_riscv_current_cycles(gt) < t->next_cycles) { |
50 | delta_ns = kvm_riscv_delta_cycles2ns(cycles: t->next_cycles, gt, t); |
51 | hrtimer_forward_now(timer: &t->hrt, interval: ktime_set(secs: 0, nsecs: delta_ns)); |
52 | return HRTIMER_RESTART; |
53 | } |
54 | |
55 | t->next_set = false; |
56 | kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_TIMER); |
57 | |
58 | return HRTIMER_NORESTART; |
59 | } |
60 | |
61 | static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t) |
62 | { |
63 | if (!t->init_done || !t->next_set) |
64 | return -EINVAL; |
65 | |
66 | hrtimer_cancel(timer: &t->hrt); |
67 | t->next_set = false; |
68 | |
69 | return 0; |
70 | } |
71 | |
72 | static int kvm_riscv_vcpu_update_vstimecmp(struct kvm_vcpu *vcpu, u64 ncycles) |
73 | { |
74 | #if defined(CONFIG_32BIT) |
75 | csr_write(CSR_VSTIMECMP, ncycles & 0xFFFFFFFF); |
76 | csr_write(CSR_VSTIMECMPH, ncycles >> 32); |
77 | #else |
78 | csr_write(CSR_VSTIMECMP, ncycles); |
79 | #endif |
80 | return 0; |
81 | } |
82 | |
83 | static int kvm_riscv_vcpu_update_hrtimer(struct kvm_vcpu *vcpu, u64 ncycles) |
84 | { |
85 | struct kvm_vcpu_timer *t = &vcpu->arch.timer; |
86 | struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; |
87 | u64 delta_ns; |
88 | |
89 | if (!t->init_done) |
90 | return -EINVAL; |
91 | |
92 | kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_TIMER); |
93 | |
94 | delta_ns = kvm_riscv_delta_cycles2ns(cycles: ncycles, gt, t); |
95 | t->next_cycles = ncycles; |
96 | hrtimer_start(timer: &t->hrt, tim: ktime_set(secs: 0, nsecs: delta_ns), mode: HRTIMER_MODE_REL); |
97 | t->next_set = true; |
98 | |
99 | return 0; |
100 | } |
101 | |
102 | int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles) |
103 | { |
104 | struct kvm_vcpu_timer *t = &vcpu->arch.timer; |
105 | |
106 | return t->timer_next_event(vcpu, ncycles); |
107 | } |
108 | |
109 | static enum hrtimer_restart kvm_riscv_vcpu_vstimer_expired(struct hrtimer *h) |
110 | { |
111 | u64 delta_ns; |
112 | struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt); |
113 | struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer); |
114 | struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; |
115 | |
116 | if (kvm_riscv_current_cycles(gt) < t->next_cycles) { |
117 | delta_ns = kvm_riscv_delta_cycles2ns(cycles: t->next_cycles, gt, t); |
118 | hrtimer_forward_now(timer: &t->hrt, interval: ktime_set(secs: 0, nsecs: delta_ns)); |
119 | return HRTIMER_RESTART; |
120 | } |
121 | |
122 | t->next_set = false; |
123 | kvm_vcpu_kick(vcpu); |
124 | |
125 | return HRTIMER_NORESTART; |
126 | } |
127 | |
128 | bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu) |
129 | { |
130 | struct kvm_vcpu_timer *t = &vcpu->arch.timer; |
131 | struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; |
132 | |
133 | if (!kvm_riscv_delta_cycles2ns(cycles: t->next_cycles, gt, t) || |
134 | kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER)) |
135 | return true; |
136 | else |
137 | return false; |
138 | } |
139 | |
140 | static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu) |
141 | { |
142 | struct kvm_vcpu_timer *t = &vcpu->arch.timer; |
143 | struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; |
144 | u64 delta_ns; |
145 | |
146 | if (!t->init_done) |
147 | return; |
148 | |
149 | delta_ns = kvm_riscv_delta_cycles2ns(cycles: t->next_cycles, gt, t); |
150 | hrtimer_start(timer: &t->hrt, tim: ktime_set(secs: 0, nsecs: delta_ns), mode: HRTIMER_MODE_REL); |
151 | t->next_set = true; |
152 | } |
153 | |
154 | static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu) |
155 | { |
156 | kvm_riscv_vcpu_timer_cancel(t: &vcpu->arch.timer); |
157 | } |
158 | |
159 | int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu, |
160 | const struct kvm_one_reg *reg) |
161 | { |
162 | struct kvm_vcpu_timer *t = &vcpu->arch.timer; |
163 | struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; |
164 | u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; |
165 | unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | |
166 | KVM_REG_SIZE_MASK | |
167 | KVM_REG_RISCV_TIMER); |
168 | u64 reg_val; |
169 | |
170 | if (KVM_REG_SIZE(reg->id) != sizeof(u64)) |
171 | return -EINVAL; |
172 | if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64)) |
173 | return -ENOENT; |
174 | |
175 | switch (reg_num) { |
176 | case KVM_REG_RISCV_TIMER_REG(frequency): |
177 | reg_val = riscv_timebase; |
178 | break; |
179 | case KVM_REG_RISCV_TIMER_REG(time): |
180 | reg_val = kvm_riscv_current_cycles(gt); |
181 | break; |
182 | case KVM_REG_RISCV_TIMER_REG(compare): |
183 | reg_val = t->next_cycles; |
184 | break; |
185 | case KVM_REG_RISCV_TIMER_REG(state): |
186 | reg_val = (t->next_set) ? KVM_RISCV_TIMER_STATE_ON : |
187 | KVM_RISCV_TIMER_STATE_OFF; |
188 | break; |
189 | default: |
190 | return -ENOENT; |
191 | } |
192 | |
193 | if (copy_to_user(to: uaddr, from: ®_val, n: KVM_REG_SIZE(reg->id))) |
194 | return -EFAULT; |
195 | |
196 | return 0; |
197 | } |
198 | |
199 | int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu, |
200 | const struct kvm_one_reg *reg) |
201 | { |
202 | struct kvm_vcpu_timer *t = &vcpu->arch.timer; |
203 | struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; |
204 | u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr; |
205 | unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | |
206 | KVM_REG_SIZE_MASK | |
207 | KVM_REG_RISCV_TIMER); |
208 | u64 reg_val; |
209 | int ret = 0; |
210 | |
211 | if (KVM_REG_SIZE(reg->id) != sizeof(u64)) |
212 | return -EINVAL; |
213 | if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64)) |
214 | return -ENOENT; |
215 | |
216 | if (copy_from_user(to: ®_val, from: uaddr, n: KVM_REG_SIZE(reg->id))) |
217 | return -EFAULT; |
218 | |
219 | switch (reg_num) { |
220 | case KVM_REG_RISCV_TIMER_REG(frequency): |
221 | if (reg_val != riscv_timebase) |
222 | return -EINVAL; |
223 | break; |
224 | case KVM_REG_RISCV_TIMER_REG(time): |
225 | gt->time_delta = reg_val - get_cycles64(); |
226 | break; |
227 | case KVM_REG_RISCV_TIMER_REG(compare): |
228 | t->next_cycles = reg_val; |
229 | break; |
230 | case KVM_REG_RISCV_TIMER_REG(state): |
231 | if (reg_val == KVM_RISCV_TIMER_STATE_ON) |
232 | ret = kvm_riscv_vcpu_timer_next_event(vcpu, ncycles: reg_val); |
233 | else |
234 | ret = kvm_riscv_vcpu_timer_cancel(t); |
235 | break; |
236 | default: |
237 | ret = -ENOENT; |
238 | break; |
239 | } |
240 | |
241 | return ret; |
242 | } |
243 | |
244 | int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu) |
245 | { |
246 | struct kvm_vcpu_timer *t = &vcpu->arch.timer; |
247 | |
248 | if (t->init_done) |
249 | return -EINVAL; |
250 | |
251 | hrtimer_init(timer: &t->hrt, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL); |
252 | t->init_done = true; |
253 | t->next_set = false; |
254 | |
255 | /* Enable sstc for every vcpu if available in hardware */ |
256 | if (riscv_isa_extension_available(NULL, SSTC)) { |
257 | t->sstc_enabled = true; |
258 | t->hrt.function = kvm_riscv_vcpu_vstimer_expired; |
259 | t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp; |
260 | } else { |
261 | t->sstc_enabled = false; |
262 | t->hrt.function = kvm_riscv_vcpu_hrtimer_expired; |
263 | t->timer_next_event = kvm_riscv_vcpu_update_hrtimer; |
264 | } |
265 | |
266 | return 0; |
267 | } |
268 | |
269 | int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu) |
270 | { |
271 | int ret; |
272 | |
273 | ret = kvm_riscv_vcpu_timer_cancel(t: &vcpu->arch.timer); |
274 | vcpu->arch.timer.init_done = false; |
275 | |
276 | return ret; |
277 | } |
278 | |
279 | int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu) |
280 | { |
281 | struct kvm_vcpu_timer *t = &vcpu->arch.timer; |
282 | |
283 | t->next_cycles = -1ULL; |
284 | return kvm_riscv_vcpu_timer_cancel(t: &vcpu->arch.timer); |
285 | } |
286 | |
287 | static void kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu *vcpu) |
288 | { |
289 | struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer; |
290 | |
291 | #if defined(CONFIG_32BIT) |
292 | csr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta)); |
293 | csr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32)); |
294 | #else |
295 | csr_write(CSR_HTIMEDELTA, gt->time_delta); |
296 | #endif |
297 | } |
298 | |
299 | void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu) |
300 | { |
301 | struct kvm_vcpu_timer *t = &vcpu->arch.timer; |
302 | |
303 | kvm_riscv_vcpu_update_timedelta(vcpu); |
304 | |
305 | if (!t->sstc_enabled) |
306 | return; |
307 | |
308 | #if defined(CONFIG_32BIT) |
309 | csr_write(CSR_VSTIMECMP, (u32)t->next_cycles); |
310 | csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32)); |
311 | #else |
312 | csr_write(CSR_VSTIMECMP, t->next_cycles); |
313 | #endif |
314 | |
315 | /* timer should be enabled for the remaining operations */ |
316 | if (unlikely(!t->init_done)) |
317 | return; |
318 | |
319 | kvm_riscv_vcpu_timer_unblocking(vcpu); |
320 | } |
321 | |
322 | void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu) |
323 | { |
324 | struct kvm_vcpu_timer *t = &vcpu->arch.timer; |
325 | |
326 | if (!t->sstc_enabled) |
327 | return; |
328 | |
329 | #if defined(CONFIG_32BIT) |
330 | t->next_cycles = csr_read(CSR_VSTIMECMP); |
331 | t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32; |
332 | #else |
333 | t->next_cycles = csr_read(CSR_VSTIMECMP); |
334 | #endif |
335 | } |
336 | |
337 | void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu) |
338 | { |
339 | struct kvm_vcpu_timer *t = &vcpu->arch.timer; |
340 | |
341 | if (!t->sstc_enabled) |
342 | return; |
343 | |
344 | /* |
345 | * The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync() |
346 | * upon every VM exit so no need to save here. |
347 | */ |
348 | |
349 | /* timer should be enabled for the remaining operations */ |
350 | if (unlikely(!t->init_done)) |
351 | return; |
352 | |
353 | if (kvm_vcpu_is_blocking(vcpu)) |
354 | kvm_riscv_vcpu_timer_blocking(vcpu); |
355 | } |
356 | |
357 | void kvm_riscv_guest_timer_init(struct kvm *kvm) |
358 | { |
359 | struct kvm_guest_timer *gt = &kvm->arch.timer; |
360 | |
361 | riscv_cs_get_mult_shift(mult: >->nsec_mult, shift: >->nsec_shift); |
362 | gt->time_delta = -get_cycles64(); |
363 | } |
364 | |