1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_PARAVIRT_TYPES_H
3#define _ASM_X86_PARAVIRT_TYPES_H
4
5/* Bitmask of what can be clobbered: usually at least eax. */
6#define CLBR_EAX (1 << 0)
7#define CLBR_ECX (1 << 1)
8#define CLBR_EDX (1 << 2)
9#define CLBR_EDI (1 << 3)
10
11#ifdef CONFIG_X86_32
12/* CLBR_ANY should match all regs platform has. For i386, that's just it */
13#define CLBR_ANY ((1 << 4) - 1)
14
15#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
16#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
17#else
18#define CLBR_RAX CLBR_EAX
19#define CLBR_RCX CLBR_ECX
20#define CLBR_RDX CLBR_EDX
21#define CLBR_RDI CLBR_EDI
22#define CLBR_RSI (1 << 4)
23#define CLBR_R8 (1 << 5)
24#define CLBR_R9 (1 << 6)
25#define CLBR_R10 (1 << 7)
26#define CLBR_R11 (1 << 8)
27
28#define CLBR_ANY ((1 << 9) - 1)
29
30#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
31 CLBR_RCX | CLBR_R8 | CLBR_R9)
32#define CLBR_RET_REG (CLBR_RAX)
33
34#endif /* X86_64 */
35
36#ifndef __ASSEMBLY__
37
38#include <asm/desc_defs.h>
39#include <asm/pgtable_types.h>
40#include <asm/nospec-branch.h>
41
42struct page;
43struct thread_struct;
44struct desc_ptr;
45struct tss_struct;
46struct mm_struct;
47struct desc_struct;
48struct task_struct;
49struct cpumask;
50struct flush_tlb_info;
51struct mmu_gather;
52struct vm_area_struct;
53
54/*
55 * Wrapper type for pointers to code which uses the non-standard
56 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
57 */
58struct paravirt_callee_save {
59 void *func;
60};
61
62/* general info */
63struct pv_info {
64#ifdef CONFIG_PARAVIRT_XXL
65 u16 extra_user_64bit_cs; /* __USER_CS if none */
66#endif
67
68 const char *name;
69};
70
71#ifdef CONFIG_PARAVIRT_XXL
72struct pv_lazy_ops {
73 /* Set deferred update mode, used for batching operations. */
74 void (*enter)(void);
75 void (*leave)(void);
76 void (*flush)(void);
77} __no_randomize_layout;
78#endif
79
80struct pv_cpu_ops {
81 /* hooks for various privileged instructions */
82 void (*io_delay)(void);
83
84#ifdef CONFIG_PARAVIRT_XXL
85 unsigned long (*get_debugreg)(int regno);
86 void (*set_debugreg)(int regno, unsigned long value);
87
88 unsigned long (*read_cr0)(void);
89 void (*write_cr0)(unsigned long);
90
91 void (*write_cr4)(unsigned long);
92
93 /* Segment descriptor handling */
94 void (*load_tr_desc)(void);
95 void (*load_gdt)(const struct desc_ptr *);
96 void (*load_idt)(const struct desc_ptr *);
97 void (*set_ldt)(const void *desc, unsigned entries);
98 unsigned long (*store_tr)(void);
99 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
100 void (*load_gs_index)(unsigned int idx);
101 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
102 const void *desc);
103 void (*write_gdt_entry)(struct desc_struct *,
104 int entrynum, const void *desc, int size);
105 void (*write_idt_entry)(gate_desc *,
106 int entrynum, const gate_desc *gate);
107 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
108 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
109
110 void (*load_sp0)(unsigned long sp0);
111
112#ifdef CONFIG_X86_IOPL_IOPERM
113 void (*invalidate_io_bitmap)(void);
114 void (*update_io_bitmap)(void);
115#endif
116
117 void (*wbinvd)(void);
118
119 /* cpuid emulation, mostly so that caps bits can be disabled */
120 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
121 unsigned int *ecx, unsigned int *edx);
122
123 /* Unsafe MSR operations. These will warn or panic on failure. */
124 u64 (*read_msr)(unsigned int msr);
125 void (*write_msr)(unsigned int msr, unsigned low, unsigned high);
126
127 /*
128 * Safe MSR operations.
129 * read sets err to 0 or -EIO. write returns 0 or -EIO.
130 */
131 u64 (*read_msr_safe)(unsigned int msr, int *err);
132 int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high);
133
134 u64 (*read_pmc)(int counter);
135
136 void (*start_context_switch)(struct task_struct *prev);
137 void (*end_context_switch)(struct task_struct *next);
138#endif
139} __no_randomize_layout;
140
141struct pv_irq_ops {
142#ifdef CONFIG_PARAVIRT_XXL
143 /*
144 * Get/set interrupt state. save_fl is expected to use X86_EFLAGS_IF;
145 * all other bits returned from save_fl are undefined.
146 *
147 * NOTE: These functions callers expect the callee to preserve
148 * more registers than the standard C calling convention.
149 */
150 struct paravirt_callee_save save_fl;
151 struct paravirt_callee_save irq_disable;
152 struct paravirt_callee_save irq_enable;
153
154 void (*safe_halt)(void);
155 void (*halt)(void);
156#endif
157} __no_randomize_layout;
158
159struct pv_mmu_ops {
160 /* TLB operations */
161 void (*flush_tlb_user)(void);
162 void (*flush_tlb_kernel)(void);
163 void (*flush_tlb_one_user)(unsigned long addr);
164 void (*flush_tlb_multi)(const struct cpumask *cpus,
165 const struct flush_tlb_info *info);
166
167 void (*tlb_remove_table)(struct mmu_gather *tlb, void *table);
168
169 /* Hook for intercepting the destruction of an mm_struct. */
170 void (*exit_mmap)(struct mm_struct *mm);
171 void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc);
172
173#ifdef CONFIG_PARAVIRT_XXL
174 struct paravirt_callee_save read_cr2;
175 void (*write_cr2)(unsigned long);
176
177 unsigned long (*read_cr3)(void);
178 void (*write_cr3)(unsigned long);
179
180 /* Hooks for intercepting the creation/use of an mm_struct. */
181 void (*activate_mm)(struct mm_struct *prev,
182 struct mm_struct *next);
183 void (*dup_mmap)(struct mm_struct *oldmm,
184 struct mm_struct *mm);
185
186 /* Hooks for allocating and freeing a pagetable top-level */
187 int (*pgd_alloc)(struct mm_struct *mm);
188 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
189
190 /*
191 * Hooks for allocating/releasing pagetable pages when they're
192 * attached to a pagetable
193 */
194 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
195 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
196 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
197 void (*alloc_p4d)(struct mm_struct *mm, unsigned long pfn);
198 void (*release_pte)(unsigned long pfn);
199 void (*release_pmd)(unsigned long pfn);
200 void (*release_pud)(unsigned long pfn);
201 void (*release_p4d)(unsigned long pfn);
202
203 /* Pagetable manipulation functions */
204 void (*set_pte)(pte_t *ptep, pte_t pteval);
205 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
206
207 pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr,
208 pte_t *ptep);
209 void (*ptep_modify_prot_commit)(struct vm_area_struct *vma, unsigned long addr,
210 pte_t *ptep, pte_t pte);
211
212 struct paravirt_callee_save pte_val;
213 struct paravirt_callee_save make_pte;
214
215 struct paravirt_callee_save pgd_val;
216 struct paravirt_callee_save make_pgd;
217
218 void (*set_pud)(pud_t *pudp, pud_t pudval);
219
220 struct paravirt_callee_save pmd_val;
221 struct paravirt_callee_save make_pmd;
222
223 struct paravirt_callee_save pud_val;
224 struct paravirt_callee_save make_pud;
225
226 void (*set_p4d)(p4d_t *p4dp, p4d_t p4dval);
227
228#if CONFIG_PGTABLE_LEVELS >= 5
229 struct paravirt_callee_save p4d_val;
230 struct paravirt_callee_save make_p4d;
231
232 void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval);
233#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
234
235 struct pv_lazy_ops lazy_mode;
236
237 /* dom0 ops */
238
239 /* Sometimes the physical address is a pfn, and sometimes its
240 an mfn. We can tell which is which from the index. */
241 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
242 phys_addr_t phys, pgprot_t flags);
243#endif
244} __no_randomize_layout;
245
246struct arch_spinlock;
247#ifdef CONFIG_SMP
248#include <asm/spinlock_types.h>
249#endif
250
251struct qspinlock;
252
253struct pv_lock_ops {
254 void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
255 struct paravirt_callee_save queued_spin_unlock;
256
257 void (*wait)(u8 *ptr, u8 val);
258 void (*kick)(int cpu);
259
260 struct paravirt_callee_save vcpu_is_preempted;
261} __no_randomize_layout;
262
263/* This contains all the paravirt structures: we get a convenient
264 * number for each function using the offset which we use to indicate
265 * what to patch. */
266struct paravirt_patch_template {
267 struct pv_cpu_ops cpu;
268 struct pv_irq_ops irq;
269 struct pv_mmu_ops mmu;
270 struct pv_lock_ops lock;
271} __no_randomize_layout;
272
273extern struct pv_info pv_info;
274extern struct paravirt_patch_template pv_ops;
275
276#define PARAVIRT_PATCH(x) \
277 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
278
279#define paravirt_type(op) \
280 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
281 [paravirt_opptr] "m" (pv_ops.op)
282#define paravirt_clobber(clobber) \
283 [paravirt_clobber] "i" (clobber)
284
285/*
286 * Generate some code, and mark it as patchable by the
287 * apply_paravirt() alternate instruction patcher.
288 */
289#define _paravirt_alt(insn_string, type, clobber) \
290 "771:\n\t" insn_string "\n" "772:\n" \
291 ".pushsection .parainstructions,\"a\"\n" \
292 _ASM_ALIGN "\n" \
293 _ASM_PTR " 771b\n" \
294 " .byte " type "\n" \
295 " .byte 772b-771b\n" \
296 " .short " clobber "\n" \
297 ".popsection\n"
298
299/* Generate patchable code, with the default asm parameters. */
300#define paravirt_alt(insn_string) \
301 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
302
303/* Simple instruction patching code. */
304#define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
305
306unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr, unsigned int len);
307
308int paravirt_disable_iospace(void);
309
310/*
311 * This generates an indirect call based on the operation type number.
312 * The type number, computed in PARAVIRT_PATCH, is derived from the
313 * offset into the paravirt_patch_template structure, and can therefore be
314 * freely converted back into a structure offset.
315 */
316#define PARAVIRT_CALL \
317 ANNOTATE_RETPOLINE_SAFE \
318 "call *%[paravirt_opptr];"
319
320/*
321 * These macros are intended to wrap calls through one of the paravirt
322 * ops structs, so that they can be later identified and patched at
323 * runtime.
324 *
325 * Normally, a call to a pv_op function is a simple indirect call:
326 * (pv_op_struct.operations)(args...).
327 *
328 * Unfortunately, this is a relatively slow operation for modern CPUs,
329 * because it cannot necessarily determine what the destination
330 * address is. In this case, the address is a runtime constant, so at
331 * the very least we can patch the call to e a simple direct call, or
332 * ideally, patch an inline implementation into the callsite. (Direct
333 * calls are essentially free, because the call and return addresses
334 * are completely predictable.)
335 *
336 * For i386, these macros rely on the standard gcc "regparm(3)" calling
337 * convention, in which the first three arguments are placed in %eax,
338 * %edx, %ecx (in that order), and the remaining arguments are placed
339 * on the stack. All caller-save registers (eax,edx,ecx) are expected
340 * to be modified (either clobbered or used for return values).
341 * X86_64, on the other hand, already specifies a register-based calling
342 * conventions, returning at %rax, with parameters going on %rdi, %rsi,
343 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
344 * special handling for dealing with 4 arguments, unlike i386.
345 * However, x86_64 also have to clobber all caller saved registers, which
346 * unfortunately, are quite a bit (r8 - r11)
347 *
348 * The call instruction itself is marked by placing its start address
349 * and size into the .parainstructions section, so that
350 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
351 * appropriate patching under the control of the backend pv_init_ops
352 * implementation.
353 *
354 * Unfortunately there's no way to get gcc to generate the args setup
355 * for the call, and then allow the call itself to be generated by an
356 * inline asm. Because of this, we must do the complete arg setup and
357 * return value handling from within these macros. This is fairly
358 * cumbersome.
359 *
360 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
361 * It could be extended to more arguments, but there would be little
362 * to be gained from that. For each number of arguments, there are
363 * the two VCALL and CALL variants for void and non-void functions.
364 *
365 * When there is a return value, the invoker of the macro must specify
366 * the return type. The macro then uses sizeof() on that type to
367 * determine whether its a 32 or 64 bit value, and places the return
368 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
369 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
370 * the return value size.
371 *
372 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
373 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
374 * in low,high order
375 *
376 * Small structures are passed and returned in registers. The macro
377 * calling convention can't directly deal with this, so the wrapper
378 * functions must do this.
379 *
380 * These PVOP_* macros are only defined within this header. This
381 * means that all uses must be wrapped in inline functions. This also
382 * makes sure the incoming and outgoing types are always correct.
383 */
384#ifdef CONFIG_X86_32
385#define PVOP_CALL_ARGS \
386 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx;
387
388#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
389#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
390#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
391
392#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
393 "=c" (__ecx)
394#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
395
396#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
397#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
398
399#define EXTRA_CLOBBERS
400#define VEXTRA_CLOBBERS
401#else /* CONFIG_X86_64 */
402/* [re]ax isn't an arg, but the return val */
403#define PVOP_CALL_ARGS \
404 unsigned long __edi = __edi, __esi = __esi, \
405 __edx = __edx, __ecx = __ecx, __eax = __eax;
406
407#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
408#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
409#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
410#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
411
412#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
413 "=S" (__esi), "=d" (__edx), \
414 "=c" (__ecx)
415#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
416
417/* void functions are still allowed [re]ax for scratch */
418#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
419#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
420
421#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
422#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
423#endif /* CONFIG_X86_32 */
424
425#ifdef CONFIG_PARAVIRT_DEBUG
426#define PVOP_TEST_NULL(op) BUG_ON(pv_ops.op == NULL)
427#else
428#define PVOP_TEST_NULL(op) ((void)pv_ops.op)
429#endif
430
431#define PVOP_RETVAL(rettype) \
432 ({ unsigned long __mask = ~0UL; \
433 BUILD_BUG_ON(sizeof(rettype) > sizeof(unsigned long)); \
434 switch (sizeof(rettype)) { \
435 case 1: __mask = 0xffUL; break; \
436 case 2: __mask = 0xffffUL; break; \
437 case 4: __mask = 0xffffffffUL; break; \
438 default: break; \
439 } \
440 __mask & __eax; \
441 })
442
443
444#define ____PVOP_CALL(ret, op, clbr, call_clbr, extra_clbr, ...) \
445 ({ \
446 PVOP_CALL_ARGS; \
447 PVOP_TEST_NULL(op); \
448 asm volatile(paravirt_alt(PARAVIRT_CALL) \
449 : call_clbr, ASM_CALL_CONSTRAINT \
450 : paravirt_type(op), \
451 paravirt_clobber(clbr), \
452 ##__VA_ARGS__ \
453 : "memory", "cc" extra_clbr); \
454 ret; \
455 })
456
457#define ____PVOP_ALT_CALL(ret, op, alt, cond, clbr, call_clbr, \
458 extra_clbr, ...) \
459 ({ \
460 PVOP_CALL_ARGS; \
461 PVOP_TEST_NULL(op); \
462 asm volatile(ALTERNATIVE(paravirt_alt(PARAVIRT_CALL), \
463 alt, cond) \
464 : call_clbr, ASM_CALL_CONSTRAINT \
465 : paravirt_type(op), \
466 paravirt_clobber(clbr), \
467 ##__VA_ARGS__ \
468 : "memory", "cc" extra_clbr); \
469 ret; \
470 })
471
472#define __PVOP_CALL(rettype, op, ...) \
473 ____PVOP_CALL(PVOP_RETVAL(rettype), op, CLBR_ANY, \
474 PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, ##__VA_ARGS__)
475
476#define __PVOP_ALT_CALL(rettype, op, alt, cond, ...) \
477 ____PVOP_ALT_CALL(PVOP_RETVAL(rettype), op, alt, cond, CLBR_ANY,\
478 PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, \
479 ##__VA_ARGS__)
480
481#define __PVOP_CALLEESAVE(rettype, op, ...) \
482 ____PVOP_CALL(PVOP_RETVAL(rettype), op.func, CLBR_RET_REG, \
483 PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__)
484
485#define __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, ...) \
486 ____PVOP_ALT_CALL(PVOP_RETVAL(rettype), op.func, alt, cond, \
487 CLBR_RET_REG, PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__)
488
489
490#define __PVOP_VCALL(op, ...) \
491 (void)____PVOP_CALL(, op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
492 VEXTRA_CLOBBERS, ##__VA_ARGS__)
493
494#define __PVOP_ALT_VCALL(op, alt, cond, ...) \
495 (void)____PVOP_ALT_CALL(, op, alt, cond, CLBR_ANY, \
496 PVOP_VCALL_CLOBBERS, VEXTRA_CLOBBERS, \
497 ##__VA_ARGS__)
498
499#define __PVOP_VCALLEESAVE(op, ...) \
500 (void)____PVOP_CALL(, op.func, CLBR_RET_REG, \
501 PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__)
502
503#define __PVOP_ALT_VCALLEESAVE(op, alt, cond, ...) \
504 (void)____PVOP_ALT_CALL(, op.func, alt, cond, CLBR_RET_REG, \
505 PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__)
506
507
508#define PVOP_CALL0(rettype, op) \
509 __PVOP_CALL(rettype, op)
510#define PVOP_VCALL0(op) \
511 __PVOP_VCALL(op)
512#define PVOP_ALT_CALL0(rettype, op, alt, cond) \
513 __PVOP_ALT_CALL(rettype, op, alt, cond)
514#define PVOP_ALT_VCALL0(op, alt, cond) \
515 __PVOP_ALT_VCALL(op, alt, cond)
516
517#define PVOP_CALLEE0(rettype, op) \
518 __PVOP_CALLEESAVE(rettype, op)
519#define PVOP_VCALLEE0(op) \
520 __PVOP_VCALLEESAVE(op)
521#define PVOP_ALT_CALLEE0(rettype, op, alt, cond) \
522 __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond)
523#define PVOP_ALT_VCALLEE0(op, alt, cond) \
524 __PVOP_ALT_VCALLEESAVE(op, alt, cond)
525
526
527#define PVOP_CALL1(rettype, op, arg1) \
528 __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1))
529#define PVOP_VCALL1(op, arg1) \
530 __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1))
531#define PVOP_ALT_VCALL1(op, arg1, alt, cond) \
532 __PVOP_ALT_VCALL(op, alt, cond, PVOP_CALL_ARG1(arg1))
533
534#define PVOP_CALLEE1(rettype, op, arg1) \
535 __PVOP_CALLEESAVE(rettype, op, PVOP_CALL_ARG1(arg1))
536#define PVOP_VCALLEE1(op, arg1) \
537 __PVOP_VCALLEESAVE(op, PVOP_CALL_ARG1(arg1))
538#define PVOP_ALT_CALLEE1(rettype, op, arg1, alt, cond) \
539 __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, PVOP_CALL_ARG1(arg1))
540#define PVOP_ALT_VCALLEE1(op, arg1, alt, cond) \
541 __PVOP_ALT_VCALLEESAVE(op, alt, cond, PVOP_CALL_ARG1(arg1))
542
543
544#define PVOP_CALL2(rettype, op, arg1, arg2) \
545 __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2))
546#define PVOP_VCALL2(op, arg1, arg2) \
547 __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2))
548
549#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
550 __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1), \
551 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
552#define PVOP_VCALL3(op, arg1, arg2, arg3) \
553 __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), \
554 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
555
556#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
557 __PVOP_CALL(rettype, op, \
558 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
559 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
560#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
561 __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
562 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
563
564/* Lazy mode for batching updates / context switch */
565enum paravirt_lazy_mode {
566 PARAVIRT_LAZY_NONE,
567 PARAVIRT_LAZY_MMU,
568 PARAVIRT_LAZY_CPU,
569};
570
571enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
572void paravirt_start_context_switch(struct task_struct *prev);
573void paravirt_end_context_switch(struct task_struct *next);
574
575void paravirt_enter_lazy_mmu(void);
576void paravirt_leave_lazy_mmu(void);
577void paravirt_flush_lazy_mmu(void);
578
579void _paravirt_nop(void);
580void paravirt_BUG(void);
581u64 _paravirt_ident_64(u64);
582unsigned long paravirt_ret0(void);
583
584#define paravirt_nop ((void *)_paravirt_nop)
585
586/* These all sit in the .parainstructions section to tell us what to patch. */
587struct paravirt_patch_site {
588 u8 *instr; /* original instructions */
589 u8 type; /* type of this instruction */
590 u8 len; /* length of original instruction */
591};
592
593extern struct paravirt_patch_site __parainstructions[],
594 __parainstructions_end[];
595
596#endif /* __ASSEMBLY__ */
597
598#endif /* _ASM_X86_PARAVIRT_TYPES_H */
599

source code of linux/arch/x86/include/asm/paravirt_types.h