1 | /* SPDX-License-Identifier: GPL-2.0 */ |
---|---|
2 | #ifndef _ASM_X86_TLB_H |
3 | #define _ASM_X86_TLB_H |
4 | |
5 | #define tlb_flush tlb_flush |
6 | static inline void tlb_flush(struct mmu_gather *tlb); |
7 | |
8 | #include <asm-generic/tlb.h> |
9 | |
10 | static inline void tlb_flush(struct mmu_gather *tlb) |
11 | { |
12 | unsigned long start = 0UL, end = TLB_FLUSH_ALL; |
13 | unsigned int stride_shift = tlb_get_unmap_shift(tlb); |
14 | |
15 | if (!tlb->fullmm && !tlb->need_flush_all) { |
16 | start = tlb->start; |
17 | end = tlb->end; |
18 | } |
19 | |
20 | flush_tlb_mm_range(mm: tlb->mm, start, end, stride_shift, freed_tables: tlb->freed_tables); |
21 | } |
22 | |
23 | /* |
24 | * While x86 architecture in general requires an IPI to perform TLB |
25 | * shootdown, enablement code for several hypervisors overrides |
26 | * .flush_tlb_others hook in pv_mmu_ops and implements it by issuing |
27 | * a hypercall. To keep software pagetable walkers safe in this case we |
28 | * switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the comment |
29 | * below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h |
30 | * for more details. |
31 | */ |
32 | static inline void __tlb_remove_table(void *table) |
33 | { |
34 | free_page_and_swap_cache(table); |
35 | } |
36 | |
37 | #endif /* _ASM_X86_TLB_H */ |
38 |