1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_PARAVIRT_H |
3 | #define _ASM_X86_PARAVIRT_H |
4 | /* Various instructions on x86 need to be replaced for |
5 | * para-virtualization: those hooks are defined here. */ |
6 | |
7 | #include <asm/paravirt_types.h> |
8 | |
9 | #ifndef __ASSEMBLY__ |
10 | struct mm_struct; |
11 | #endif |
12 | |
13 | #ifdef CONFIG_PARAVIRT |
14 | #include <asm/pgtable_types.h> |
15 | #include <asm/asm.h> |
16 | #include <asm/nospec-branch.h> |
17 | |
18 | #ifndef __ASSEMBLY__ |
19 | #include <linux/bug.h> |
20 | #include <linux/types.h> |
21 | #include <linux/cpumask.h> |
22 | #include <linux/static_call_types.h> |
23 | #include <asm/frame.h> |
24 | |
25 | u64 dummy_steal_clock(int cpu); |
26 | u64 dummy_sched_clock(void); |
27 | |
28 | DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock); |
29 | DECLARE_STATIC_CALL(pv_sched_clock, dummy_sched_clock); |
30 | |
31 | void paravirt_set_sched_clock(u64 (*func)(void)); |
32 | |
33 | static __always_inline u64 paravirt_sched_clock(void) |
34 | { |
35 | return static_call(pv_sched_clock)(); |
36 | } |
37 | |
38 | struct static_key; |
39 | extern struct static_key paravirt_steal_enabled; |
40 | extern struct static_key paravirt_steal_rq_enabled; |
41 | |
42 | __visible void __native_queued_spin_unlock(struct qspinlock *lock); |
43 | bool pv_is_native_spin_unlock(void); |
44 | __visible bool __native_vcpu_is_preempted(long cpu); |
45 | bool pv_is_native_vcpu_is_preempted(void); |
46 | |
47 | static inline u64 paravirt_steal_clock(int cpu) |
48 | { |
49 | return static_call(pv_steal_clock)(cpu); |
50 | } |
51 | |
52 | #ifdef CONFIG_PARAVIRT_SPINLOCKS |
53 | void __init paravirt_set_cap(void); |
54 | #endif |
55 | |
56 | /* The paravirtualized I/O functions */ |
57 | static inline void slow_down_io(void) |
58 | { |
59 | PVOP_VCALL0(cpu.io_delay); |
60 | #ifdef REALLY_SLOW_IO |
61 | PVOP_VCALL0(cpu.io_delay); |
62 | PVOP_VCALL0(cpu.io_delay); |
63 | PVOP_VCALL0(cpu.io_delay); |
64 | #endif |
65 | } |
66 | |
67 | void native_flush_tlb_local(void); |
68 | void native_flush_tlb_global(void); |
69 | void native_flush_tlb_one_user(unsigned long addr); |
70 | void native_flush_tlb_multi(const struct cpumask *cpumask, |
71 | const struct flush_tlb_info *info); |
72 | |
73 | static inline void __flush_tlb_local(void) |
74 | { |
75 | PVOP_VCALL0(mmu.flush_tlb_user); |
76 | } |
77 | |
78 | static inline void __flush_tlb_global(void) |
79 | { |
80 | PVOP_VCALL0(mmu.flush_tlb_kernel); |
81 | } |
82 | |
83 | static inline void __flush_tlb_one_user(unsigned long addr) |
84 | { |
85 | PVOP_VCALL1(mmu.flush_tlb_one_user, addr); |
86 | } |
87 | |
88 | static inline void __flush_tlb_multi(const struct cpumask *cpumask, |
89 | const struct flush_tlb_info *info) |
90 | { |
91 | PVOP_VCALL2(mmu.flush_tlb_multi, cpumask, info); |
92 | } |
93 | |
94 | static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table) |
95 | { |
96 | PVOP_VCALL2(mmu.tlb_remove_table, tlb, table); |
97 | } |
98 | |
99 | static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) |
100 | { |
101 | PVOP_VCALL1(mmu.exit_mmap, mm); |
102 | } |
103 | |
104 | static inline void notify_page_enc_status_changed(unsigned long pfn, |
105 | int npages, bool enc) |
106 | { |
107 | PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc); |
108 | } |
109 | |
110 | #ifdef CONFIG_PARAVIRT_XXL |
111 | static inline void load_sp0(unsigned long sp0) |
112 | { |
113 | PVOP_VCALL1(cpu.load_sp0, sp0); |
114 | } |
115 | |
116 | /* The paravirtualized CPUID instruction. */ |
117 | static inline void __cpuid(unsigned int *eax, unsigned int *ebx, |
118 | unsigned int *ecx, unsigned int *edx) |
119 | { |
120 | PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx); |
121 | } |
122 | |
123 | /* |
124 | * These special macros can be used to get or set a debugging register |
125 | */ |
126 | static __always_inline unsigned long paravirt_get_debugreg(int reg) |
127 | { |
128 | return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg); |
129 | } |
130 | #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg) |
131 | static __always_inline void set_debugreg(unsigned long val, int reg) |
132 | { |
133 | PVOP_VCALL2(cpu.set_debugreg, reg, val); |
134 | } |
135 | |
136 | static inline unsigned long read_cr0(void) |
137 | { |
138 | return PVOP_CALL0(unsigned long, cpu.read_cr0); |
139 | } |
140 | |
141 | static inline void write_cr0(unsigned long x) |
142 | { |
143 | PVOP_VCALL1(cpu.write_cr0, x); |
144 | } |
145 | |
146 | static __always_inline unsigned long read_cr2(void) |
147 | { |
148 | return PVOP_ALT_CALLEE0(unsigned long, mmu.read_cr2, |
149 | "mov %%cr2, %%rax;" , ALT_NOT_XEN); |
150 | } |
151 | |
152 | static __always_inline void write_cr2(unsigned long x) |
153 | { |
154 | PVOP_VCALL1(mmu.write_cr2, x); |
155 | } |
156 | |
157 | static inline unsigned long __read_cr3(void) |
158 | { |
159 | return PVOP_ALT_CALL0(unsigned long, mmu.read_cr3, |
160 | "mov %%cr3, %%rax;" , ALT_NOT_XEN); |
161 | } |
162 | |
163 | static inline void write_cr3(unsigned long x) |
164 | { |
165 | PVOP_ALT_VCALL1(mmu.write_cr3, x, "mov %%rdi, %%cr3" , ALT_NOT_XEN); |
166 | } |
167 | |
168 | static inline void __write_cr4(unsigned long x) |
169 | { |
170 | PVOP_VCALL1(cpu.write_cr4, x); |
171 | } |
172 | |
173 | static __always_inline void arch_safe_halt(void) |
174 | { |
175 | PVOP_VCALL0(irq.safe_halt); |
176 | } |
177 | |
178 | static inline void halt(void) |
179 | { |
180 | PVOP_VCALL0(irq.halt); |
181 | } |
182 | |
183 | extern noinstr void pv_native_wbinvd(void); |
184 | |
185 | static __always_inline void wbinvd(void) |
186 | { |
187 | PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd" , ALT_NOT_XEN); |
188 | } |
189 | |
190 | static inline u64 paravirt_read_msr(unsigned msr) |
191 | { |
192 | return PVOP_CALL1(u64, cpu.read_msr, msr); |
193 | } |
194 | |
195 | static inline void paravirt_write_msr(unsigned msr, |
196 | unsigned low, unsigned high) |
197 | { |
198 | PVOP_VCALL3(cpu.write_msr, msr, low, high); |
199 | } |
200 | |
201 | static inline u64 paravirt_read_msr_safe(unsigned msr, int *err) |
202 | { |
203 | return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err); |
204 | } |
205 | |
206 | static inline int paravirt_write_msr_safe(unsigned msr, |
207 | unsigned low, unsigned high) |
208 | { |
209 | return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high); |
210 | } |
211 | |
212 | #define rdmsr(msr, val1, val2) \ |
213 | do { \ |
214 | u64 _l = paravirt_read_msr(msr); \ |
215 | val1 = (u32)_l; \ |
216 | val2 = _l >> 32; \ |
217 | } while (0) |
218 | |
219 | #define wrmsr(msr, val1, val2) \ |
220 | do { \ |
221 | paravirt_write_msr(msr, val1, val2); \ |
222 | } while (0) |
223 | |
224 | #define rdmsrl(msr, val) \ |
225 | do { \ |
226 | val = paravirt_read_msr(msr); \ |
227 | } while (0) |
228 | |
229 | static inline void wrmsrl(unsigned msr, u64 val) |
230 | { |
231 | wrmsr(msr, (u32)val, (u32)(val>>32)); |
232 | } |
233 | |
234 | #define wrmsr_safe(msr, a, b) paravirt_write_msr_safe(msr, a, b) |
235 | |
236 | /* rdmsr with exception handling */ |
237 | #define rdmsr_safe(msr, a, b) \ |
238 | ({ \ |
239 | int _err; \ |
240 | u64 _l = paravirt_read_msr_safe(msr, &_err); \ |
241 | (*a) = (u32)_l; \ |
242 | (*b) = _l >> 32; \ |
243 | _err; \ |
244 | }) |
245 | |
246 | static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) |
247 | { |
248 | int err; |
249 | |
250 | *p = paravirt_read_msr_safe(msr, err: &err); |
251 | return err; |
252 | } |
253 | |
254 | static inline unsigned long long paravirt_read_pmc(int counter) |
255 | { |
256 | return PVOP_CALL1(u64, cpu.read_pmc, counter); |
257 | } |
258 | |
259 | #define rdpmc(counter, low, high) \ |
260 | do { \ |
261 | u64 _l = paravirt_read_pmc(counter); \ |
262 | low = (u32)_l; \ |
263 | high = _l >> 32; \ |
264 | } while (0) |
265 | |
266 | #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter)) |
267 | |
268 | static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) |
269 | { |
270 | PVOP_VCALL2(cpu.alloc_ldt, ldt, entries); |
271 | } |
272 | |
273 | static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) |
274 | { |
275 | PVOP_VCALL2(cpu.free_ldt, ldt, entries); |
276 | } |
277 | |
278 | static inline void load_TR_desc(void) |
279 | { |
280 | PVOP_VCALL0(cpu.load_tr_desc); |
281 | } |
282 | static inline void load_gdt(const struct desc_ptr *dtr) |
283 | { |
284 | PVOP_VCALL1(cpu.load_gdt, dtr); |
285 | } |
286 | static inline void load_idt(const struct desc_ptr *dtr) |
287 | { |
288 | PVOP_VCALL1(cpu.load_idt, dtr); |
289 | } |
290 | static inline void set_ldt(const void *addr, unsigned entries) |
291 | { |
292 | PVOP_VCALL2(cpu.set_ldt, addr, entries); |
293 | } |
294 | static inline unsigned long paravirt_store_tr(void) |
295 | { |
296 | return PVOP_CALL0(unsigned long, cpu.store_tr); |
297 | } |
298 | |
299 | #define store_tr(tr) ((tr) = paravirt_store_tr()) |
300 | static inline void load_TLS(struct thread_struct *t, unsigned cpu) |
301 | { |
302 | PVOP_VCALL2(cpu.load_tls, t, cpu); |
303 | } |
304 | |
305 | static inline void load_gs_index(unsigned int gs) |
306 | { |
307 | PVOP_VCALL1(cpu.load_gs_index, gs); |
308 | } |
309 | |
310 | static inline void write_ldt_entry(struct desc_struct *dt, int entry, |
311 | const void *desc) |
312 | { |
313 | PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc); |
314 | } |
315 | |
316 | static inline void write_gdt_entry(struct desc_struct *dt, int entry, |
317 | void *desc, int type) |
318 | { |
319 | PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type); |
320 | } |
321 | |
322 | static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) |
323 | { |
324 | PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g); |
325 | } |
326 | |
327 | #ifdef CONFIG_X86_IOPL_IOPERM |
328 | static inline void tss_invalidate_io_bitmap(void) |
329 | { |
330 | PVOP_VCALL0(cpu.invalidate_io_bitmap); |
331 | } |
332 | |
333 | static inline void tss_update_io_bitmap(void) |
334 | { |
335 | PVOP_VCALL0(cpu.update_io_bitmap); |
336 | } |
337 | #endif |
338 | |
339 | static inline void paravirt_enter_mmap(struct mm_struct *next) |
340 | { |
341 | PVOP_VCALL1(mmu.enter_mmap, next); |
342 | } |
343 | |
344 | static inline int paravirt_pgd_alloc(struct mm_struct *mm) |
345 | { |
346 | return PVOP_CALL1(int, mmu.pgd_alloc, mm); |
347 | } |
348 | |
349 | static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) |
350 | { |
351 | PVOP_VCALL2(mmu.pgd_free, mm, pgd); |
352 | } |
353 | |
354 | static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) |
355 | { |
356 | PVOP_VCALL2(mmu.alloc_pte, mm, pfn); |
357 | } |
358 | static inline void paravirt_release_pte(unsigned long pfn) |
359 | { |
360 | PVOP_VCALL1(mmu.release_pte, pfn); |
361 | } |
362 | |
363 | static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) |
364 | { |
365 | PVOP_VCALL2(mmu.alloc_pmd, mm, pfn); |
366 | } |
367 | |
368 | static inline void paravirt_release_pmd(unsigned long pfn) |
369 | { |
370 | PVOP_VCALL1(mmu.release_pmd, pfn); |
371 | } |
372 | |
373 | static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) |
374 | { |
375 | PVOP_VCALL2(mmu.alloc_pud, mm, pfn); |
376 | } |
377 | static inline void paravirt_release_pud(unsigned long pfn) |
378 | { |
379 | PVOP_VCALL1(mmu.release_pud, pfn); |
380 | } |
381 | |
382 | static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) |
383 | { |
384 | PVOP_VCALL2(mmu.alloc_p4d, mm, pfn); |
385 | } |
386 | |
387 | static inline void paravirt_release_p4d(unsigned long pfn) |
388 | { |
389 | PVOP_VCALL1(mmu.release_p4d, pfn); |
390 | } |
391 | |
392 | static inline pte_t __pte(pteval_t val) |
393 | { |
394 | return (pte_t) { PVOP_ALT_CALLEE1(pteval_t, mmu.make_pte, val, |
395 | "mov %%rdi, %%rax" , ALT_NOT_XEN) }; |
396 | } |
397 | |
398 | static inline pteval_t pte_val(pte_t pte) |
399 | { |
400 | return PVOP_ALT_CALLEE1(pteval_t, mmu.pte_val, pte.pte, |
401 | "mov %%rdi, %%rax" , ALT_NOT_XEN); |
402 | } |
403 | |
404 | static inline pgd_t __pgd(pgdval_t val) |
405 | { |
406 | return (pgd_t) { PVOP_ALT_CALLEE1(pgdval_t, mmu.make_pgd, val, |
407 | "mov %%rdi, %%rax" , ALT_NOT_XEN) }; |
408 | } |
409 | |
410 | static inline pgdval_t pgd_val(pgd_t pgd) |
411 | { |
412 | return PVOP_ALT_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd, |
413 | "mov %%rdi, %%rax" , ALT_NOT_XEN); |
414 | } |
415 | |
416 | #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION |
417 | static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, |
418 | pte_t *ptep) |
419 | { |
420 | pteval_t ret; |
421 | |
422 | ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep); |
423 | |
424 | return (pte_t) { .pte = ret }; |
425 | } |
426 | |
427 | static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, |
428 | pte_t *ptep, pte_t old_pte, pte_t pte) |
429 | { |
430 | |
431 | PVOP_VCALL4(mmu.ptep_modify_prot_commit, vma, addr, ptep, pte.pte); |
432 | } |
433 | |
434 | static inline void set_pte(pte_t *ptep, pte_t pte) |
435 | { |
436 | PVOP_VCALL2(mmu.set_pte, ptep, pte.pte); |
437 | } |
438 | |
439 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) |
440 | { |
441 | PVOP_VCALL2(mmu.set_pmd, pmdp, native_pmd_val(pmd)); |
442 | } |
443 | |
444 | static inline pmd_t __pmd(pmdval_t val) |
445 | { |
446 | return (pmd_t) { PVOP_ALT_CALLEE1(pmdval_t, mmu.make_pmd, val, |
447 | "mov %%rdi, %%rax" , ALT_NOT_XEN) }; |
448 | } |
449 | |
450 | static inline pmdval_t pmd_val(pmd_t pmd) |
451 | { |
452 | return PVOP_ALT_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd, |
453 | "mov %%rdi, %%rax" , ALT_NOT_XEN); |
454 | } |
455 | |
456 | static inline void set_pud(pud_t *pudp, pud_t pud) |
457 | { |
458 | PVOP_VCALL2(mmu.set_pud, pudp, native_pud_val(pud)); |
459 | } |
460 | |
461 | static inline pud_t __pud(pudval_t val) |
462 | { |
463 | pudval_t ret; |
464 | |
465 | ret = PVOP_ALT_CALLEE1(pudval_t, mmu.make_pud, val, |
466 | "mov %%rdi, %%rax" , ALT_NOT_XEN); |
467 | |
468 | return (pud_t) { ret }; |
469 | } |
470 | |
471 | static inline pudval_t pud_val(pud_t pud) |
472 | { |
473 | return PVOP_ALT_CALLEE1(pudval_t, mmu.pud_val, pud.pud, |
474 | "mov %%rdi, %%rax" , ALT_NOT_XEN); |
475 | } |
476 | |
477 | static inline void pud_clear(pud_t *pudp) |
478 | { |
479 | set_pud(pudp, pud: native_make_pud(val: 0)); |
480 | } |
481 | |
482 | static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) |
483 | { |
484 | p4dval_t val = native_p4d_val(p4d); |
485 | |
486 | PVOP_VCALL2(mmu.set_p4d, p4dp, val); |
487 | } |
488 | |
489 | #if CONFIG_PGTABLE_LEVELS >= 5 |
490 | |
491 | static inline p4d_t __p4d(p4dval_t val) |
492 | { |
493 | p4dval_t ret = PVOP_ALT_CALLEE1(p4dval_t, mmu.make_p4d, val, |
494 | "mov %%rdi, %%rax" , ALT_NOT_XEN); |
495 | |
496 | return (p4d_t) { ret }; |
497 | } |
498 | |
499 | static inline p4dval_t p4d_val(p4d_t p4d) |
500 | { |
501 | return PVOP_ALT_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d, |
502 | "mov %%rdi, %%rax" , ALT_NOT_XEN); |
503 | } |
504 | |
505 | static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd) |
506 | { |
507 | PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd)); |
508 | } |
509 | |
510 | #define set_pgd(pgdp, pgdval) do { \ |
511 | if (pgtable_l5_enabled()) \ |
512 | __set_pgd(pgdp, pgdval); \ |
513 | else \ |
514 | set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd }); \ |
515 | } while (0) |
516 | |
517 | #define pgd_clear(pgdp) do { \ |
518 | if (pgtable_l5_enabled()) \ |
519 | set_pgd(pgdp, native_make_pgd(0)); \ |
520 | } while (0) |
521 | |
522 | #endif /* CONFIG_PGTABLE_LEVELS == 5 */ |
523 | |
524 | static inline void p4d_clear(p4d_t *p4dp) |
525 | { |
526 | set_p4d(p4dp, p4d: native_make_p4d(val: 0)); |
527 | } |
528 | |
529 | static inline void set_pte_atomic(pte_t *ptep, pte_t pte) |
530 | { |
531 | set_pte(ptep, pte); |
532 | } |
533 | |
534 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, |
535 | pte_t *ptep) |
536 | { |
537 | set_pte(ptep, pte: native_make_pte(val: 0)); |
538 | } |
539 | |
540 | static inline void pmd_clear(pmd_t *pmdp) |
541 | { |
542 | set_pmd(pmdp, pmd: native_make_pmd(val: 0)); |
543 | } |
544 | |
545 | #define __HAVE_ARCH_START_CONTEXT_SWITCH |
546 | static inline void arch_start_context_switch(struct task_struct *prev) |
547 | { |
548 | PVOP_VCALL1(cpu.start_context_switch, prev); |
549 | } |
550 | |
551 | static inline void arch_end_context_switch(struct task_struct *next) |
552 | { |
553 | PVOP_VCALL1(cpu.end_context_switch, next); |
554 | } |
555 | |
556 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE |
557 | static inline void arch_enter_lazy_mmu_mode(void) |
558 | { |
559 | PVOP_VCALL0(mmu.lazy_mode.enter); |
560 | } |
561 | |
562 | static inline void arch_leave_lazy_mmu_mode(void) |
563 | { |
564 | PVOP_VCALL0(mmu.lazy_mode.leave); |
565 | } |
566 | |
567 | static inline void arch_flush_lazy_mmu_mode(void) |
568 | { |
569 | PVOP_VCALL0(mmu.lazy_mode.flush); |
570 | } |
571 | |
572 | static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, |
573 | phys_addr_t phys, pgprot_t flags) |
574 | { |
575 | pv_ops.mmu.set_fixmap(idx, phys, flags); |
576 | } |
577 | #endif |
578 | |
579 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) |
580 | |
581 | static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, |
582 | u32 val) |
583 | { |
584 | PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val); |
585 | } |
586 | |
587 | static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock) |
588 | { |
589 | PVOP_ALT_VCALLEE1(lock.queued_spin_unlock, lock, |
590 | "movb $0, (%%" _ASM_ARG1 ");" , |
591 | ALT_NOT(X86_FEATURE_PVUNLOCK)); |
592 | } |
593 | |
594 | static __always_inline void pv_wait(u8 *ptr, u8 val) |
595 | { |
596 | PVOP_VCALL2(lock.wait, ptr, val); |
597 | } |
598 | |
599 | static __always_inline void pv_kick(int cpu) |
600 | { |
601 | PVOP_VCALL1(lock.kick, cpu); |
602 | } |
603 | |
604 | static __always_inline bool pv_vcpu_is_preempted(long cpu) |
605 | { |
606 | return PVOP_ALT_CALLEE1(bool, lock.vcpu_is_preempted, cpu, |
607 | "xor %%" _ASM_AX ", %%" _ASM_AX ";" , |
608 | ALT_NOT(X86_FEATURE_VCPUPREEMPT)); |
609 | } |
610 | |
611 | void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock); |
612 | bool __raw_callee_save___native_vcpu_is_preempted(long cpu); |
613 | |
614 | #endif /* SMP && PARAVIRT_SPINLOCKS */ |
615 | |
616 | #ifdef CONFIG_X86_32 |
617 | /* save and restore all caller-save registers, except return value */ |
618 | #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;" |
619 | #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;" |
620 | #else |
621 | /* save and restore all caller-save registers, except return value */ |
622 | #define PV_SAVE_ALL_CALLER_REGS \ |
623 | "push %rcx;" \ |
624 | "push %rdx;" \ |
625 | "push %rsi;" \ |
626 | "push %rdi;" \ |
627 | "push %r8;" \ |
628 | "push %r9;" \ |
629 | "push %r10;" \ |
630 | "push %r11;" |
631 | #define PV_RESTORE_ALL_CALLER_REGS \ |
632 | "pop %r11;" \ |
633 | "pop %r10;" \ |
634 | "pop %r9;" \ |
635 | "pop %r8;" \ |
636 | "pop %rdi;" \ |
637 | "pop %rsi;" \ |
638 | "pop %rdx;" \ |
639 | "pop %rcx;" |
640 | #endif |
641 | |
642 | /* |
643 | * Generate a thunk around a function which saves all caller-save |
644 | * registers except for the return value. This allows C functions to |
645 | * be called from assembler code where fewer than normal registers are |
646 | * available. It may also help code generation around calls from C |
647 | * code if the common case doesn't use many registers. |
648 | * |
649 | * When a callee is wrapped in a thunk, the caller can assume that all |
650 | * arg regs and all scratch registers are preserved across the |
651 | * call. The return value in rax/eax will not be saved, even for void |
652 | * functions. |
653 | */ |
654 | #define PV_THUNK_NAME(func) "__raw_callee_save_" #func |
655 | #define __PV_CALLEE_SAVE_REGS_THUNK(func, section) \ |
656 | extern typeof(func) __raw_callee_save_##func; \ |
657 | \ |
658 | asm(".pushsection " section ", \"ax\";" \ |
659 | ".globl " PV_THUNK_NAME(func) ";" \ |
660 | ".type " PV_THUNK_NAME(func) ", @function;" \ |
661 | ASM_FUNC_ALIGN \ |
662 | PV_THUNK_NAME(func) ":" \ |
663 | ASM_ENDBR \ |
664 | FRAME_BEGIN \ |
665 | PV_SAVE_ALL_CALLER_REGS \ |
666 | "call " #func ";" \ |
667 | PV_RESTORE_ALL_CALLER_REGS \ |
668 | FRAME_END \ |
669 | ASM_RET \ |
670 | ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \ |
671 | ".popsection") |
672 | |
673 | #define PV_CALLEE_SAVE_REGS_THUNK(func) \ |
674 | __PV_CALLEE_SAVE_REGS_THUNK(func, ".text") |
675 | |
676 | /* Get a reference to a callee-save function */ |
677 | #define PV_CALLEE_SAVE(func) \ |
678 | ((struct paravirt_callee_save) { __raw_callee_save_##func }) |
679 | |
680 | /* Promise that "func" already uses the right calling convention */ |
681 | #define __PV_IS_CALLEE_SAVE(func) \ |
682 | ((struct paravirt_callee_save) { func }) |
683 | |
684 | #ifdef CONFIG_PARAVIRT_XXL |
685 | static __always_inline unsigned long arch_local_save_flags(void) |
686 | { |
687 | return PVOP_ALT_CALLEE0(unsigned long, irq.save_fl, "pushf; pop %%rax;" , |
688 | ALT_NOT_XEN); |
689 | } |
690 | |
691 | static __always_inline void arch_local_irq_disable(void) |
692 | { |
693 | PVOP_ALT_VCALLEE0(irq.irq_disable, "cli;" , ALT_NOT_XEN); |
694 | } |
695 | |
696 | static __always_inline void arch_local_irq_enable(void) |
697 | { |
698 | PVOP_ALT_VCALLEE0(irq.irq_enable, "sti;" , ALT_NOT_XEN); |
699 | } |
700 | |
701 | static __always_inline unsigned long arch_local_irq_save(void) |
702 | { |
703 | unsigned long f; |
704 | |
705 | f = arch_local_save_flags(); |
706 | arch_local_irq_disable(); |
707 | return f; |
708 | } |
709 | #endif |
710 | |
711 | |
712 | /* Make sure as little as possible of this mess escapes. */ |
713 | #undef PARAVIRT_CALL |
714 | #undef __PVOP_CALL |
715 | #undef __PVOP_VCALL |
716 | #undef PVOP_VCALL0 |
717 | #undef PVOP_CALL0 |
718 | #undef PVOP_VCALL1 |
719 | #undef PVOP_CALL1 |
720 | #undef PVOP_VCALL2 |
721 | #undef PVOP_CALL2 |
722 | #undef PVOP_VCALL3 |
723 | #undef PVOP_CALL3 |
724 | #undef PVOP_VCALL4 |
725 | #undef PVOP_CALL4 |
726 | |
727 | extern void default_banner(void); |
728 | void native_pv_lock_init(void) __init; |
729 | |
730 | #else /* __ASSEMBLY__ */ |
731 | |
732 | #ifdef CONFIG_X86_64 |
733 | #ifdef CONFIG_PARAVIRT_XXL |
734 | #ifdef CONFIG_DEBUG_ENTRY |
735 | |
736 | #define PARA_INDIRECT(addr) *addr(%rip) |
737 | |
738 | .macro PARA_IRQ_save_fl |
739 | ANNOTATE_RETPOLINE_SAFE; |
740 | call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl); |
741 | .endm |
742 | |
743 | #define SAVE_FLAGS ALTERNATIVE_2 "PARA_IRQ_save_fl;", \ |
744 | "ALT_CALL_INSTR;", ALT_CALL_ALWAYS, \ |
745 | "pushf; pop %rax;", ALT_NOT_XEN |
746 | #endif |
747 | #endif /* CONFIG_PARAVIRT_XXL */ |
748 | #endif /* CONFIG_X86_64 */ |
749 | |
750 | #endif /* __ASSEMBLY__ */ |
751 | #else /* CONFIG_PARAVIRT */ |
752 | # define default_banner x86_init_noop |
753 | |
754 | #ifndef __ASSEMBLY__ |
755 | static inline void native_pv_lock_init(void) |
756 | { |
757 | } |
758 | #endif |
759 | #endif /* !CONFIG_PARAVIRT */ |
760 | |
761 | #ifndef __ASSEMBLY__ |
762 | #ifndef CONFIG_PARAVIRT_XXL |
763 | static inline void paravirt_enter_mmap(struct mm_struct *mm) |
764 | { |
765 | } |
766 | #endif |
767 | |
768 | #ifndef CONFIG_PARAVIRT |
769 | static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) |
770 | { |
771 | } |
772 | #endif |
773 | |
774 | #ifndef CONFIG_PARAVIRT_SPINLOCKS |
775 | static inline void paravirt_set_cap(void) |
776 | { |
777 | } |
778 | #endif |
779 | #endif /* __ASSEMBLY__ */ |
780 | #endif /* _ASM_X86_PARAVIRT_H */ |
781 | |