1 | // SPDX-License-Identifier: GPL-2.0+ |
---|---|
2 | /* |
3 | * Secure VM platform |
4 | * |
5 | * Copyright 2018 IBM Corporation |
6 | * Author: Anshuman Khandual <khandual@linux.vnet.ibm.com> |
7 | */ |
8 | |
9 | #include <linux/mm.h> |
10 | #include <linux/memblock.h> |
11 | #include <linux/cc_platform.h> |
12 | #include <asm/machdep.h> |
13 | #include <asm/svm.h> |
14 | #include <asm/swiotlb.h> |
15 | #include <asm/ultravisor.h> |
16 | #include <asm/dtl.h> |
17 | |
18 | static int __init init_svm(void) |
19 | { |
20 | if (!is_secure_guest()) |
21 | return 0; |
22 | |
23 | /* Don't release the SWIOTLB buffer. */ |
24 | ppc_swiotlb_enable = 1; |
25 | |
26 | /* |
27 | * Since the guest memory is inaccessible to the host, devices always |
28 | * need to use the SWIOTLB buffer for DMA even if dma_capable() says |
29 | * otherwise. |
30 | */ |
31 | ppc_swiotlb_flags |= SWIOTLB_ANY | SWIOTLB_FORCE; |
32 | |
33 | /* Share the SWIOTLB buffer with the host. */ |
34 | swiotlb_update_mem_attributes(); |
35 | |
36 | return 0; |
37 | } |
38 | machine_early_initcall(pseries, init_svm); |
39 | |
40 | int set_memory_encrypted(unsigned long addr, int numpages) |
41 | { |
42 | if (!cc_platform_has(attr: CC_ATTR_MEM_ENCRYPT)) |
43 | return 0; |
44 | |
45 | if (!PAGE_ALIGNED(addr)) |
46 | return -EINVAL; |
47 | |
48 | uv_unshare_page(PHYS_PFN(__pa(addr)), numpages); |
49 | |
50 | return 0; |
51 | } |
52 | |
53 | int set_memory_decrypted(unsigned long addr, int numpages) |
54 | { |
55 | if (!cc_platform_has(attr: CC_ATTR_MEM_ENCRYPT)) |
56 | return 0; |
57 | |
58 | if (!PAGE_ALIGNED(addr)) |
59 | return -EINVAL; |
60 | |
61 | uv_share_page(PHYS_PFN(__pa(addr)), numpages); |
62 | |
63 | return 0; |
64 | } |
65 | |
66 | /* There's one dispatch log per CPU. */ |
67 | #define NR_DTL_PAGE (DISPATCH_LOG_BYTES * CONFIG_NR_CPUS / PAGE_SIZE) |
68 | |
69 | static struct page *dtl_page_store[NR_DTL_PAGE]; |
70 | static long dtl_nr_pages; |
71 | |
72 | static bool is_dtl_page_shared(struct page *page) |
73 | { |
74 | long i; |
75 | |
76 | for (i = 0; i < dtl_nr_pages; i++) |
77 | if (dtl_page_store[i] == page) |
78 | return true; |
79 | |
80 | return false; |
81 | } |
82 | |
83 | void dtl_cache_ctor(void *addr) |
84 | { |
85 | unsigned long pfn = PHYS_PFN(__pa(addr)); |
86 | struct page *page = pfn_to_page(pfn); |
87 | |
88 | if (!is_dtl_page_shared(page)) { |
89 | dtl_page_store[dtl_nr_pages] = page; |
90 | dtl_nr_pages++; |
91 | WARN_ON(dtl_nr_pages >= NR_DTL_PAGE); |
92 | uv_share_page(pfn, 1); |
93 | } |
94 | } |
95 |