| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _ASM_X86_PARAVIRT_TYPES_H |
| 3 | #define _ASM_X86_PARAVIRT_TYPES_H |
| 4 | |
| 5 | #ifdef CONFIG_PARAVIRT |
| 6 | |
| 7 | #ifndef __ASSEMBLER__ |
| 8 | #include <linux/types.h> |
| 9 | |
| 10 | #include <asm/desc_defs.h> |
| 11 | #include <asm/pgtable_types.h> |
| 12 | #include <asm/nospec-branch.h> |
| 13 | |
| 14 | struct page; |
| 15 | struct thread_struct; |
| 16 | struct desc_ptr; |
| 17 | struct tss_struct; |
| 18 | struct mm_struct; |
| 19 | struct desc_struct; |
| 20 | struct task_struct; |
| 21 | struct cpumask; |
| 22 | struct flush_tlb_info; |
| 23 | struct mmu_gather; |
| 24 | struct vm_area_struct; |
| 25 | |
| 26 | /* |
| 27 | * Wrapper type for pointers to code which uses the non-standard |
| 28 | * calling convention. See PV_CALL_SAVE_REGS_THUNK below. |
| 29 | */ |
| 30 | struct paravirt_callee_save { |
| 31 | void *func; |
| 32 | }; |
| 33 | |
| 34 | /* general info */ |
| 35 | struct pv_info { |
| 36 | #ifdef CONFIG_PARAVIRT_XXL |
| 37 | u16 ; /* __USER_CS if none */ |
| 38 | #endif |
| 39 | |
| 40 | const char *name; |
| 41 | }; |
| 42 | |
| 43 | #ifdef CONFIG_PARAVIRT_XXL |
| 44 | struct pv_lazy_ops { |
| 45 | /* Set deferred update mode, used for batching operations. */ |
| 46 | void (*enter)(void); |
| 47 | void (*leave)(void); |
| 48 | void (*flush)(void); |
| 49 | } __no_randomize_layout; |
| 50 | #endif |
| 51 | |
| 52 | struct pv_cpu_ops { |
| 53 | /* hooks for various privileged instructions */ |
| 54 | void (*io_delay)(void); |
| 55 | |
| 56 | #ifdef CONFIG_PARAVIRT_XXL |
| 57 | unsigned long (*get_debugreg)(int regno); |
| 58 | void (*set_debugreg)(int regno, unsigned long value); |
| 59 | |
| 60 | unsigned long (*read_cr0)(void); |
| 61 | void (*write_cr0)(unsigned long); |
| 62 | |
| 63 | void (*write_cr4)(unsigned long); |
| 64 | |
| 65 | /* Segment descriptor handling */ |
| 66 | void (*load_tr_desc)(void); |
| 67 | void (*load_gdt)(const struct desc_ptr *); |
| 68 | void (*load_idt)(const struct desc_ptr *); |
| 69 | void (*set_ldt)(const void *desc, unsigned entries); |
| 70 | unsigned long (*store_tr)(void); |
| 71 | void (*load_tls)(struct thread_struct *t, unsigned int cpu); |
| 72 | void (*load_gs_index)(unsigned int idx); |
| 73 | void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, |
| 74 | const void *desc); |
| 75 | void (*write_gdt_entry)(struct desc_struct *, |
| 76 | int entrynum, const void *desc, int size); |
| 77 | void (*write_idt_entry)(gate_desc *, |
| 78 | int entrynum, const gate_desc *gate); |
| 79 | void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries); |
| 80 | void (*free_ldt)(struct desc_struct *ldt, unsigned entries); |
| 81 | |
| 82 | void (*load_sp0)(unsigned long sp0); |
| 83 | |
| 84 | #ifdef CONFIG_X86_IOPL_IOPERM |
| 85 | void (*invalidate_io_bitmap)(void); |
| 86 | void (*update_io_bitmap)(void); |
| 87 | #endif |
| 88 | |
| 89 | /* cpuid emulation, mostly so that caps bits can be disabled */ |
| 90 | void (*cpuid)(unsigned int *eax, unsigned int *ebx, |
| 91 | unsigned int *ecx, unsigned int *edx); |
| 92 | |
| 93 | /* Unsafe MSR operations. These will warn or panic on failure. */ |
| 94 | u64 (*read_msr)(u32 msr); |
| 95 | void (*write_msr)(u32 msr, u64 val); |
| 96 | |
| 97 | /* |
| 98 | * Safe MSR operations. |
| 99 | * Returns 0 or -EIO. |
| 100 | */ |
| 101 | int (*read_msr_safe)(u32 msr, u64 *val); |
| 102 | int (*write_msr_safe)(u32 msr, u64 val); |
| 103 | |
| 104 | u64 (*read_pmc)(int counter); |
| 105 | |
| 106 | void (*start_context_switch)(struct task_struct *prev); |
| 107 | void (*end_context_switch)(struct task_struct *next); |
| 108 | #endif |
| 109 | } __no_randomize_layout; |
| 110 | |
| 111 | struct pv_irq_ops { |
| 112 | #ifdef CONFIG_PARAVIRT_XXL |
| 113 | /* |
| 114 | * Get/set interrupt state. save_fl is expected to use X86_EFLAGS_IF; |
| 115 | * all other bits returned from save_fl are undefined. |
| 116 | * |
| 117 | * NOTE: These functions callers expect the callee to preserve |
| 118 | * more registers than the standard C calling convention. |
| 119 | */ |
| 120 | struct paravirt_callee_save save_fl; |
| 121 | struct paravirt_callee_save irq_disable; |
| 122 | struct paravirt_callee_save irq_enable; |
| 123 | #endif |
| 124 | void (*safe_halt)(void); |
| 125 | void (*halt)(void); |
| 126 | } __no_randomize_layout; |
| 127 | |
| 128 | struct pv_mmu_ops { |
| 129 | /* TLB operations */ |
| 130 | void (*flush_tlb_user)(void); |
| 131 | void (*flush_tlb_kernel)(void); |
| 132 | void (*flush_tlb_one_user)(unsigned long addr); |
| 133 | void (*flush_tlb_multi)(const struct cpumask *cpus, |
| 134 | const struct flush_tlb_info *info); |
| 135 | |
| 136 | /* Hook for intercepting the destruction of an mm_struct. */ |
| 137 | void (*exit_mmap)(struct mm_struct *mm); |
| 138 | void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc); |
| 139 | |
| 140 | #ifdef CONFIG_PARAVIRT_XXL |
| 141 | struct paravirt_callee_save read_cr2; |
| 142 | void (*write_cr2)(unsigned long); |
| 143 | |
| 144 | unsigned long (*read_cr3)(void); |
| 145 | void (*write_cr3)(unsigned long); |
| 146 | |
| 147 | /* Hook for intercepting the creation/use of an mm_struct. */ |
| 148 | void (*enter_mmap)(struct mm_struct *mm); |
| 149 | |
| 150 | /* Hooks for allocating and freeing a pagetable top-level */ |
| 151 | int (*pgd_alloc)(struct mm_struct *mm); |
| 152 | void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd); |
| 153 | |
| 154 | /* |
| 155 | * Hooks for allocating/releasing pagetable pages when they're |
| 156 | * attached to a pagetable |
| 157 | */ |
| 158 | void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); |
| 159 | void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); |
| 160 | void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); |
| 161 | void (*alloc_p4d)(struct mm_struct *mm, unsigned long pfn); |
| 162 | void (*release_pte)(unsigned long pfn); |
| 163 | void (*release_pmd)(unsigned long pfn); |
| 164 | void (*release_pud)(unsigned long pfn); |
| 165 | void (*release_p4d)(unsigned long pfn); |
| 166 | |
| 167 | /* Pagetable manipulation functions */ |
| 168 | void (*set_pte)(pte_t *ptep, pte_t pteval); |
| 169 | void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); |
| 170 | |
| 171 | pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr, |
| 172 | pte_t *ptep); |
| 173 | void (*ptep_modify_prot_commit)(struct vm_area_struct *vma, unsigned long addr, |
| 174 | pte_t *ptep, pte_t pte); |
| 175 | |
| 176 | struct paravirt_callee_save pte_val; |
| 177 | struct paravirt_callee_save make_pte; |
| 178 | |
| 179 | struct paravirt_callee_save pgd_val; |
| 180 | struct paravirt_callee_save make_pgd; |
| 181 | |
| 182 | void (*set_pud)(pud_t *pudp, pud_t pudval); |
| 183 | |
| 184 | struct paravirt_callee_save pmd_val; |
| 185 | struct paravirt_callee_save make_pmd; |
| 186 | |
| 187 | struct paravirt_callee_save pud_val; |
| 188 | struct paravirt_callee_save make_pud; |
| 189 | |
| 190 | void (*set_p4d)(p4d_t *p4dp, p4d_t p4dval); |
| 191 | |
| 192 | struct paravirt_callee_save p4d_val; |
| 193 | struct paravirt_callee_save make_p4d; |
| 194 | |
| 195 | void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval); |
| 196 | |
| 197 | struct pv_lazy_ops lazy_mode; |
| 198 | |
| 199 | /* dom0 ops */ |
| 200 | |
| 201 | /* Sometimes the physical address is a pfn, and sometimes its |
| 202 | an mfn. We can tell which is which from the index. */ |
| 203 | void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, |
| 204 | phys_addr_t phys, pgprot_t flags); |
| 205 | #endif |
| 206 | } __no_randomize_layout; |
| 207 | |
| 208 | struct arch_spinlock; |
| 209 | #ifdef CONFIG_SMP |
| 210 | #include <asm/spinlock_types.h> |
| 211 | #endif |
| 212 | |
| 213 | struct qspinlock; |
| 214 | |
| 215 | struct pv_lock_ops { |
| 216 | void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); |
| 217 | struct paravirt_callee_save queued_spin_unlock; |
| 218 | |
| 219 | void (*wait)(u8 *ptr, u8 val); |
| 220 | void (*kick)(int cpu); |
| 221 | |
| 222 | struct paravirt_callee_save vcpu_is_preempted; |
| 223 | } __no_randomize_layout; |
| 224 | |
| 225 | /* This contains all the paravirt structures: we get a convenient |
| 226 | * number for each function using the offset which we use to indicate |
| 227 | * what to patch. */ |
| 228 | struct paravirt_patch_template { |
| 229 | struct pv_cpu_ops cpu; |
| 230 | struct pv_irq_ops irq; |
| 231 | struct pv_mmu_ops mmu; |
| 232 | struct pv_lock_ops lock; |
| 233 | } __no_randomize_layout; |
| 234 | |
| 235 | extern struct pv_info pv_info; |
| 236 | extern struct paravirt_patch_template pv_ops; |
| 237 | |
| 238 | #define paravirt_ptr(op) [paravirt_opptr] "m" (pv_ops.op) |
| 239 | |
| 240 | /* |
| 241 | * This generates an indirect call based on the operation type number. |
| 242 | * |
| 243 | * Since alternatives run after enabling CET/IBT -- the latter setting/clearing |
| 244 | * capabilities and the former requiring all capabilities being finalized -- |
| 245 | * these indirect calls are subject to IBT and the paravirt stubs should have |
| 246 | * ENDBR on. |
| 247 | * |
| 248 | * OTOH since this is effectively a __nocfi indirect call, the paravirt stubs |
| 249 | * don't need to bother with CFI prefixes. |
| 250 | */ |
| 251 | #define PARAVIRT_CALL \ |
| 252 | ANNOTATE_RETPOLINE_SAFE \ |
| 253 | "call *%[paravirt_opptr];" |
| 254 | |
| 255 | /* |
| 256 | * These macros are intended to wrap calls through one of the paravirt |
| 257 | * ops structs, so that they can be later identified and patched at |
| 258 | * runtime. |
| 259 | * |
| 260 | * Normally, a call to a pv_op function is a simple indirect call: |
| 261 | * (pv_op_struct.operations)(args...). |
| 262 | * |
| 263 | * Unfortunately, this is a relatively slow operation for modern CPUs, |
| 264 | * because it cannot necessarily determine what the destination |
| 265 | * address is. In this case, the address is a runtime constant, so at |
| 266 | * the very least we can patch the call to a simple direct call, or, |
| 267 | * ideally, patch an inline implementation into the callsite. (Direct |
| 268 | * calls are essentially free, because the call and return addresses |
| 269 | * are completely predictable.) |
| 270 | * |
| 271 | * For i386, these macros rely on the standard gcc "regparm(3)" calling |
| 272 | * convention, in which the first three arguments are placed in %eax, |
| 273 | * %edx, %ecx (in that order), and the remaining arguments are placed |
| 274 | * on the stack. All caller-save registers (eax,edx,ecx) are expected |
| 275 | * to be modified (either clobbered or used for return values). |
| 276 | * X86_64, on the other hand, already specifies a register-based calling |
| 277 | * conventions, returning at %rax, with parameters going in %rdi, %rsi, |
| 278 | * %rdx, and %rcx. Note that for this reason, x86_64 does not need any |
| 279 | * special handling for dealing with 4 arguments, unlike i386. |
| 280 | * However, x86_64 also has to clobber all caller saved registers, which |
| 281 | * unfortunately, are quite a bit (r8 - r11) |
| 282 | * |
| 283 | * Unfortunately there's no way to get gcc to generate the args setup |
| 284 | * for the call, and then allow the call itself to be generated by an |
| 285 | * inline asm. Because of this, we must do the complete arg setup and |
| 286 | * return value handling from within these macros. This is fairly |
| 287 | * cumbersome. |
| 288 | * |
| 289 | * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments. |
| 290 | * It could be extended to more arguments, but there would be little |
| 291 | * to be gained from that. For each number of arguments, there are |
| 292 | * two VCALL and CALL variants for void and non-void functions. |
| 293 | * |
| 294 | * When there is a return value, the invoker of the macro must specify |
| 295 | * the return type. The macro then uses sizeof() on that type to |
| 296 | * determine whether it's a 32 or 64 bit value and places the return |
| 297 | * in the right register(s) (just %eax for 32-bit, and %edx:%eax for |
| 298 | * 64-bit). For x86_64 machines, it just returns in %rax regardless of |
| 299 | * the return value size. |
| 300 | * |
| 301 | * 64-bit arguments are passed as a pair of adjacent 32-bit arguments; |
| 302 | * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments |
| 303 | * in low,high order |
| 304 | * |
| 305 | * Small structures are passed and returned in registers. The macro |
| 306 | * calling convention can't directly deal with this, so the wrapper |
| 307 | * functions must do it. |
| 308 | * |
| 309 | * These PVOP_* macros are only defined within this header. This |
| 310 | * means that all uses must be wrapped in inline functions. This also |
| 311 | * makes sure the incoming and outgoing types are always correct. |
| 312 | */ |
| 313 | #ifdef CONFIG_X86_32 |
| 314 | #define PVOP_CALL_ARGS \ |
| 315 | unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; |
| 316 | |
| 317 | #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) |
| 318 | #define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x)) |
| 319 | #define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x)) |
| 320 | |
| 321 | #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ |
| 322 | "=c" (__ecx) |
| 323 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS |
| 324 | |
| 325 | #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx) |
| 326 | #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS |
| 327 | |
| 328 | #define EXTRA_CLOBBERS |
| 329 | #define VEXTRA_CLOBBERS |
| 330 | #else /* CONFIG_X86_64 */ |
| 331 | /* [re]ax isn't an arg, but the return val */ |
| 332 | #define PVOP_CALL_ARGS \ |
| 333 | unsigned long __edi = __edi, __esi = __esi, \ |
| 334 | __edx = __edx, __ecx = __ecx, __eax = __eax; |
| 335 | |
| 336 | #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) |
| 337 | #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) |
| 338 | #define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x)) |
| 339 | #define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x)) |
| 340 | |
| 341 | #define PVOP_VCALL_CLOBBERS "=D" (__edi), \ |
| 342 | "=S" (__esi), "=d" (__edx), \ |
| 343 | "=c" (__ecx) |
| 344 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) |
| 345 | |
| 346 | /* |
| 347 | * void functions are still allowed [re]ax for scratch. |
| 348 | * |
| 349 | * The ZERO_CALL_USED REGS feature may end up zeroing out callee-saved |
| 350 | * registers. Make sure we model this with the appropriate clobbers. |
| 351 | */ |
| 352 | #ifdef CONFIG_ZERO_CALL_USED_REGS |
| 353 | #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), PVOP_VCALL_CLOBBERS |
| 354 | #else |
| 355 | #define PVOP_VCALLEE_CLOBBERS "=a" (__eax) |
| 356 | #endif |
| 357 | #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS |
| 358 | |
| 359 | #define , "r8", "r9", "r10", "r11" |
| 360 | #define , "rax", "r8", "r9", "r10", "r11" |
| 361 | #endif /* CONFIG_X86_32 */ |
| 362 | |
| 363 | #ifdef CONFIG_PARAVIRT_DEBUG |
| 364 | #define PVOP_TEST_NULL(op) BUG_ON(pv_ops.op == NULL) |
| 365 | #else |
| 366 | #define PVOP_TEST_NULL(op) ((void)pv_ops.op) |
| 367 | #endif |
| 368 | |
| 369 | #define PVOP_RETVAL(rettype) \ |
| 370 | ({ unsigned long __mask = ~0UL; \ |
| 371 | BUILD_BUG_ON(sizeof(rettype) > sizeof(unsigned long)); \ |
| 372 | switch (sizeof(rettype)) { \ |
| 373 | case 1: __mask = 0xffUL; break; \ |
| 374 | case 2: __mask = 0xffffUL; break; \ |
| 375 | case 4: __mask = 0xffffffffUL; break; \ |
| 376 | default: break; \ |
| 377 | } \ |
| 378 | __mask & __eax; \ |
| 379 | }) |
| 380 | |
| 381 | /* |
| 382 | * Use alternative patching for paravirt calls: |
| 383 | * - For replacing an indirect call with a direct one, use the "normal" |
| 384 | * ALTERNATIVE() macro with the indirect call as the initial code sequence, |
| 385 | * which will be replaced with the related direct call by using the |
| 386 | * ALT_FLAG_DIRECT_CALL special case and the "always on" feature. |
| 387 | * - In case the replacement is either a direct call or a short code sequence |
| 388 | * depending on a feature bit, the ALTERNATIVE_2() macro is being used. |
| 389 | * The indirect call is the initial code sequence again, while the special |
| 390 | * code sequence is selected with the specified feature bit. In case the |
| 391 | * feature is not active, the direct call is used as above via the |
| 392 | * ALT_FLAG_DIRECT_CALL special case and the "always on" feature. |
| 393 | */ |
| 394 | #define ____PVOP_CALL(ret, op, call_clbr, extra_clbr, ...) \ |
| 395 | ({ \ |
| 396 | PVOP_CALL_ARGS; \ |
| 397 | PVOP_TEST_NULL(op); \ |
| 398 | asm volatile(ALTERNATIVE(PARAVIRT_CALL, ALT_CALL_INSTR, \ |
| 399 | ALT_CALL_ALWAYS) \ |
| 400 | : call_clbr, ASM_CALL_CONSTRAINT \ |
| 401 | : paravirt_ptr(op), \ |
| 402 | ##__VA_ARGS__ \ |
| 403 | : "memory", "cc" extra_clbr); \ |
| 404 | ret; \ |
| 405 | }) |
| 406 | |
| 407 | #define ____PVOP_ALT_CALL(ret, op, alt, cond, call_clbr, \ |
| 408 | extra_clbr, ...) \ |
| 409 | ({ \ |
| 410 | PVOP_CALL_ARGS; \ |
| 411 | PVOP_TEST_NULL(op); \ |
| 412 | asm volatile(ALTERNATIVE_2(PARAVIRT_CALL, \ |
| 413 | ALT_CALL_INSTR, ALT_CALL_ALWAYS, \ |
| 414 | alt, cond) \ |
| 415 | : call_clbr, ASM_CALL_CONSTRAINT \ |
| 416 | : paravirt_ptr(op), \ |
| 417 | ##__VA_ARGS__ \ |
| 418 | : "memory", "cc" extra_clbr); \ |
| 419 | ret; \ |
| 420 | }) |
| 421 | |
| 422 | #define __PVOP_CALL(rettype, op, ...) \ |
| 423 | ____PVOP_CALL(PVOP_RETVAL(rettype), op, \ |
| 424 | PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, ##__VA_ARGS__) |
| 425 | |
| 426 | #define __PVOP_ALT_CALL(rettype, op, alt, cond, ...) \ |
| 427 | ____PVOP_ALT_CALL(PVOP_RETVAL(rettype), op, alt, cond, \ |
| 428 | PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, \ |
| 429 | ##__VA_ARGS__) |
| 430 | |
| 431 | #define __PVOP_CALLEESAVE(rettype, op, ...) \ |
| 432 | ____PVOP_CALL(PVOP_RETVAL(rettype), op.func, \ |
| 433 | PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__) |
| 434 | |
| 435 | #define __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, ...) \ |
| 436 | ____PVOP_ALT_CALL(PVOP_RETVAL(rettype), op.func, alt, cond, \ |
| 437 | PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__) |
| 438 | |
| 439 | |
| 440 | #define __PVOP_VCALL(op, ...) \ |
| 441 | (void)____PVOP_CALL(, op, PVOP_VCALL_CLOBBERS, \ |
| 442 | VEXTRA_CLOBBERS, ##__VA_ARGS__) |
| 443 | |
| 444 | #define __PVOP_ALT_VCALL(op, alt, cond, ...) \ |
| 445 | (void)____PVOP_ALT_CALL(, op, alt, cond, \ |
| 446 | PVOP_VCALL_CLOBBERS, VEXTRA_CLOBBERS, \ |
| 447 | ##__VA_ARGS__) |
| 448 | |
| 449 | #define __PVOP_VCALLEESAVE(op, ...) \ |
| 450 | (void)____PVOP_CALL(, op.func, \ |
| 451 | PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__) |
| 452 | |
| 453 | #define __PVOP_ALT_VCALLEESAVE(op, alt, cond, ...) \ |
| 454 | (void)____PVOP_ALT_CALL(, op.func, alt, cond, \ |
| 455 | PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__) |
| 456 | |
| 457 | |
| 458 | #define PVOP_CALL0(rettype, op) \ |
| 459 | __PVOP_CALL(rettype, op) |
| 460 | #define PVOP_VCALL0(op) \ |
| 461 | __PVOP_VCALL(op) |
| 462 | #define PVOP_ALT_CALL0(rettype, op, alt, cond) \ |
| 463 | __PVOP_ALT_CALL(rettype, op, alt, cond) |
| 464 | #define PVOP_ALT_VCALL0(op, alt, cond) \ |
| 465 | __PVOP_ALT_VCALL(op, alt, cond) |
| 466 | |
| 467 | #define PVOP_CALLEE0(rettype, op) \ |
| 468 | __PVOP_CALLEESAVE(rettype, op) |
| 469 | #define PVOP_VCALLEE0(op) \ |
| 470 | __PVOP_VCALLEESAVE(op) |
| 471 | #define PVOP_ALT_CALLEE0(rettype, op, alt, cond) \ |
| 472 | __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond) |
| 473 | #define PVOP_ALT_VCALLEE0(op, alt, cond) \ |
| 474 | __PVOP_ALT_VCALLEESAVE(op, alt, cond) |
| 475 | |
| 476 | |
| 477 | #define PVOP_CALL1(rettype, op, arg1) \ |
| 478 | __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1)) |
| 479 | #define PVOP_VCALL1(op, arg1) \ |
| 480 | __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1)) |
| 481 | #define PVOP_ALT_VCALL1(op, arg1, alt, cond) \ |
| 482 | __PVOP_ALT_VCALL(op, alt, cond, PVOP_CALL_ARG1(arg1)) |
| 483 | |
| 484 | #define PVOP_CALLEE1(rettype, op, arg1) \ |
| 485 | __PVOP_CALLEESAVE(rettype, op, PVOP_CALL_ARG1(arg1)) |
| 486 | #define PVOP_VCALLEE1(op, arg1) \ |
| 487 | __PVOP_VCALLEESAVE(op, PVOP_CALL_ARG1(arg1)) |
| 488 | #define PVOP_ALT_CALLEE1(rettype, op, arg1, alt, cond) \ |
| 489 | __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, PVOP_CALL_ARG1(arg1)) |
| 490 | #define PVOP_ALT_VCALLEE1(op, arg1, alt, cond) \ |
| 491 | __PVOP_ALT_VCALLEESAVE(op, alt, cond, PVOP_CALL_ARG1(arg1)) |
| 492 | |
| 493 | |
| 494 | #define PVOP_CALL2(rettype, op, arg1, arg2) \ |
| 495 | __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2)) |
| 496 | #define PVOP_VCALL2(op, arg1, arg2) \ |
| 497 | __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2)) |
| 498 | |
| 499 | #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ |
| 500 | __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1), \ |
| 501 | PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) |
| 502 | #define PVOP_VCALL3(op, arg1, arg2, arg3) \ |
| 503 | __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), \ |
| 504 | PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) |
| 505 | |
| 506 | #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ |
| 507 | __PVOP_CALL(rettype, op, \ |
| 508 | PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ |
| 509 | PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) |
| 510 | #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ |
| 511 | __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ |
| 512 | PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) |
| 513 | |
| 514 | unsigned long paravirt_ret0(void); |
| 515 | #ifdef CONFIG_PARAVIRT_XXL |
| 516 | u64 _paravirt_ident_64(u64); |
| 517 | unsigned long pv_native_save_fl(void); |
| 518 | void pv_native_irq_disable(void); |
| 519 | void pv_native_irq_enable(void); |
| 520 | unsigned long pv_native_read_cr2(void); |
| 521 | #endif |
| 522 | |
| 523 | #define paravirt_nop ((void *)nop_func) |
| 524 | |
| 525 | #endif /* __ASSEMBLER__ */ |
| 526 | |
| 527 | #define ALT_NOT_XEN ALT_NOT(X86_FEATURE_XENPV) |
| 528 | |
| 529 | #endif /* CONFIG_PARAVIRT */ |
| 530 | #endif /* _ASM_X86_PARAVIRT_TYPES_H */ |
| 531 | |