1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * AMD Memory Encryption Support |
4 | * |
5 | * Copyright (C) 2016 Advanced Micro Devices, Inc. |
6 | * |
7 | * Author: Tom Lendacky <thomas.lendacky@amd.com> |
8 | */ |
9 | |
10 | #define DISABLE_BRANCH_PROFILING |
11 | |
12 | /* |
13 | * Since we're dealing with identity mappings, physical and virtual |
14 | * addresses are the same, so override these defines which are ultimately |
15 | * used by the headers in misc.h. |
16 | */ |
17 | #define __pa(x) ((unsigned long)(x)) |
18 | #define __va(x) ((void *)((unsigned long)(x))) |
19 | |
20 | /* |
21 | * Special hack: we have to be careful, because no indirections are |
22 | * allowed here, and paravirt_ops is a kind of one. As it will only run in |
23 | * baremetal anyway, we just keep it from happening. (This list needs to |
24 | * be extended when new paravirt and debugging variants are added.) |
25 | */ |
26 | #undef CONFIG_PARAVIRT |
27 | #undef CONFIG_PARAVIRT_XXL |
28 | #undef CONFIG_PARAVIRT_SPINLOCKS |
29 | |
30 | /* |
31 | * This code runs before CPU feature bits are set. By default, the |
32 | * pgtable_l5_enabled() function uses bit X86_FEATURE_LA57 to determine if |
33 | * 5-level paging is active, so that won't work here. USE_EARLY_PGTABLE_L5 |
34 | * is provided to handle this situation and, instead, use a variable that |
35 | * has been set by the early boot code. |
36 | */ |
37 | #define USE_EARLY_PGTABLE_L5 |
38 | |
39 | #include <linux/kernel.h> |
40 | #include <linux/mm.h> |
41 | #include <linux/mem_encrypt.h> |
42 | #include <linux/cc_platform.h> |
43 | |
44 | #include <asm/setup.h> |
45 | #include <asm/sections.h> |
46 | #include <asm/cmdline.h> |
47 | #include <asm/coco.h> |
48 | #include <asm/sev.h> |
49 | |
50 | #include "mm_internal.h" |
51 | |
52 | #define PGD_FLAGS _KERNPG_TABLE_NOENC |
53 | #define P4D_FLAGS _KERNPG_TABLE_NOENC |
54 | #define PUD_FLAGS _KERNPG_TABLE_NOENC |
55 | #define PMD_FLAGS _KERNPG_TABLE_NOENC |
56 | |
57 | #define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) |
58 | |
59 | #define PMD_FLAGS_DEC PMD_FLAGS_LARGE |
60 | #define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_LARGE_CACHE_MASK) | \ |
61 | (_PAGE_PAT_LARGE | _PAGE_PWT)) |
62 | |
63 | #define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC) |
64 | |
65 | #define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL) |
66 | |
67 | #define PTE_FLAGS_DEC PTE_FLAGS |
68 | #define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \ |
69 | (_PAGE_PAT | _PAGE_PWT)) |
70 | |
71 | #define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC) |
72 | |
73 | struct sme_populate_pgd_data { |
74 | void *pgtable_area; |
75 | pgd_t *pgd; |
76 | |
77 | pmdval_t pmd_flags; |
78 | pteval_t pte_flags; |
79 | unsigned long paddr; |
80 | |
81 | unsigned long vaddr; |
82 | unsigned long vaddr_end; |
83 | }; |
84 | |
85 | /* |
86 | * This work area lives in the .init.scratch section, which lives outside of |
87 | * the kernel proper. It is sized to hold the intermediate copy buffer and |
88 | * more than enough pagetable pages. |
89 | * |
90 | * By using this section, the kernel can be encrypted in place and it |
91 | * avoids any possibility of boot parameters or initramfs images being |
92 | * placed such that the in-place encryption logic overwrites them. This |
93 | * section is 2MB aligned to allow for simple pagetable setup using only |
94 | * PMD entries (see vmlinux.lds.S). |
95 | */ |
96 | static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch" ); |
97 | |
98 | static char sme_cmdline_arg[] __initdata = "mem_encrypt" ; |
99 | static char sme_cmdline_on[] __initdata = "on" ; |
100 | static char sme_cmdline_off[] __initdata = "off" ; |
101 | |
102 | static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd) |
103 | { |
104 | unsigned long pgd_start, pgd_end, pgd_size; |
105 | pgd_t *pgd_p; |
106 | |
107 | pgd_start = ppd->vaddr & PGDIR_MASK; |
108 | pgd_end = ppd->vaddr_end & PGDIR_MASK; |
109 | |
110 | pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t); |
111 | |
112 | pgd_p = ppd->pgd + pgd_index(ppd->vaddr); |
113 | |
114 | memset(pgd_p, 0, pgd_size); |
115 | } |
116 | |
117 | static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd) |
118 | { |
119 | pgd_t *pgd; |
120 | p4d_t *p4d; |
121 | pud_t *pud; |
122 | pmd_t *pmd; |
123 | |
124 | pgd = ppd->pgd + pgd_index(ppd->vaddr); |
125 | if (pgd_none(pgd: *pgd)) { |
126 | p4d = ppd->pgtable_area; |
127 | memset(p4d, 0, sizeof(*p4d) * PTRS_PER_P4D); |
128 | ppd->pgtable_area += sizeof(*p4d) * PTRS_PER_P4D; |
129 | set_pgd(pgd, __pgd(PGD_FLAGS | __pa(p4d))); |
130 | } |
131 | |
132 | p4d = p4d_offset(pgd, address: ppd->vaddr); |
133 | if (p4d_none(p4d: *p4d)) { |
134 | pud = ppd->pgtable_area; |
135 | memset(pud, 0, sizeof(*pud) * PTRS_PER_PUD); |
136 | ppd->pgtable_area += sizeof(*pud) * PTRS_PER_PUD; |
137 | set_p4d(p4d, __p4d(P4D_FLAGS | __pa(pud))); |
138 | } |
139 | |
140 | pud = pud_offset(p4d, address: ppd->vaddr); |
141 | if (pud_none(pud: *pud)) { |
142 | pmd = ppd->pgtable_area; |
143 | memset(pmd, 0, sizeof(*pmd) * PTRS_PER_PMD); |
144 | ppd->pgtable_area += sizeof(*pmd) * PTRS_PER_PMD; |
145 | set_pud(pud, __pud(PUD_FLAGS | __pa(pmd))); |
146 | } |
147 | |
148 | if (pud_large(pud: *pud)) |
149 | return NULL; |
150 | |
151 | return pud; |
152 | } |
153 | |
154 | static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) |
155 | { |
156 | pud_t *pud; |
157 | pmd_t *pmd; |
158 | |
159 | pud = sme_prepare_pgd(ppd); |
160 | if (!pud) |
161 | return; |
162 | |
163 | pmd = pmd_offset(pud, address: ppd->vaddr); |
164 | if (pmd_large(pte: *pmd)) |
165 | return; |
166 | |
167 | set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags)); |
168 | } |
169 | |
170 | static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd) |
171 | { |
172 | pud_t *pud; |
173 | pmd_t *pmd; |
174 | pte_t *pte; |
175 | |
176 | pud = sme_prepare_pgd(ppd); |
177 | if (!pud) |
178 | return; |
179 | |
180 | pmd = pmd_offset(pud, address: ppd->vaddr); |
181 | if (pmd_none(pmd: *pmd)) { |
182 | pte = ppd->pgtable_area; |
183 | memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE); |
184 | ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE; |
185 | set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte))); |
186 | } |
187 | |
188 | if (pmd_large(pte: *pmd)) |
189 | return; |
190 | |
191 | pte = pte_offset_kernel(pmd, address: ppd->vaddr); |
192 | if (pte_none(pte: *pte)) |
193 | set_pte(pte, __pte(ppd->paddr | ppd->pte_flags)); |
194 | } |
195 | |
196 | static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd) |
197 | { |
198 | while (ppd->vaddr < ppd->vaddr_end) { |
199 | sme_populate_pgd_large(ppd); |
200 | |
201 | ppd->vaddr += PMD_SIZE; |
202 | ppd->paddr += PMD_SIZE; |
203 | } |
204 | } |
205 | |
206 | static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd) |
207 | { |
208 | while (ppd->vaddr < ppd->vaddr_end) { |
209 | sme_populate_pgd(ppd); |
210 | |
211 | ppd->vaddr += PAGE_SIZE; |
212 | ppd->paddr += PAGE_SIZE; |
213 | } |
214 | } |
215 | |
216 | static void __init __sme_map_range(struct sme_populate_pgd_data *ppd, |
217 | pmdval_t pmd_flags, pteval_t pte_flags) |
218 | { |
219 | unsigned long vaddr_end; |
220 | |
221 | ppd->pmd_flags = pmd_flags; |
222 | ppd->pte_flags = pte_flags; |
223 | |
224 | /* Save original end value since we modify the struct value */ |
225 | vaddr_end = ppd->vaddr_end; |
226 | |
227 | /* If start is not 2MB aligned, create PTE entries */ |
228 | ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_SIZE); |
229 | __sme_map_range_pte(ppd); |
230 | |
231 | /* Create PMD entries */ |
232 | ppd->vaddr_end = vaddr_end & PMD_MASK; |
233 | __sme_map_range_pmd(ppd); |
234 | |
235 | /* If end is not 2MB aligned, create PTE entries */ |
236 | ppd->vaddr_end = vaddr_end; |
237 | __sme_map_range_pte(ppd); |
238 | } |
239 | |
240 | static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd) |
241 | { |
242 | __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC); |
243 | } |
244 | |
245 | static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd) |
246 | { |
247 | __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC); |
248 | } |
249 | |
250 | static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd) |
251 | { |
252 | __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP); |
253 | } |
254 | |
255 | static unsigned long __init sme_pgtable_calc(unsigned long len) |
256 | { |
257 | unsigned long entries = 0, tables = 0; |
258 | |
259 | /* |
260 | * Perform a relatively simplistic calculation of the pagetable |
261 | * entries that are needed. Those mappings will be covered mostly |
262 | * by 2MB PMD entries so we can conservatively calculate the required |
263 | * number of P4D, PUD and PMD structures needed to perform the |
264 | * mappings. For mappings that are not 2MB aligned, PTE mappings |
265 | * would be needed for the start and end portion of the address range |
266 | * that fall outside of the 2MB alignment. This results in, at most, |
267 | * two extra pages to hold PTE entries for each range that is mapped. |
268 | * Incrementing the count for each covers the case where the addresses |
269 | * cross entries. |
270 | */ |
271 | |
272 | /* PGDIR_SIZE is equal to P4D_SIZE on 4-level machine. */ |
273 | if (PTRS_PER_P4D > 1) |
274 | entries += (DIV_ROUND_UP(len, PGDIR_SIZE) + 1) * sizeof(p4d_t) * PTRS_PER_P4D; |
275 | entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD; |
276 | entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD; |
277 | entries += 2 * sizeof(pte_t) * PTRS_PER_PTE; |
278 | |
279 | /* |
280 | * Now calculate the added pagetable structures needed to populate |
281 | * the new pagetables. |
282 | */ |
283 | |
284 | if (PTRS_PER_P4D > 1) |
285 | tables += DIV_ROUND_UP(entries, PGDIR_SIZE) * sizeof(p4d_t) * PTRS_PER_P4D; |
286 | tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD; |
287 | tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD; |
288 | |
289 | return entries + tables; |
290 | } |
291 | |
292 | void __init sme_encrypt_kernel(struct boot_params *bp) |
293 | { |
294 | unsigned long workarea_start, workarea_end, workarea_len; |
295 | unsigned long execute_start, execute_end, execute_len; |
296 | unsigned long kernel_start, kernel_end, kernel_len; |
297 | unsigned long initrd_start, initrd_end, initrd_len; |
298 | struct sme_populate_pgd_data ppd; |
299 | unsigned long pgtable_area_len; |
300 | unsigned long decrypted_base; |
301 | |
302 | /* |
303 | * This is early code, use an open coded check for SME instead of |
304 | * using cc_platform_has(). This eliminates worries about removing |
305 | * instrumentation or checking boot_cpu_data in the cc_platform_has() |
306 | * function. |
307 | */ |
308 | if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED) |
309 | return; |
310 | |
311 | /* |
312 | * Prepare for encrypting the kernel and initrd by building new |
313 | * pagetables with the necessary attributes needed to encrypt the |
314 | * kernel in place. |
315 | * |
316 | * One range of virtual addresses will map the memory occupied |
317 | * by the kernel and initrd as encrypted. |
318 | * |
319 | * Another range of virtual addresses will map the memory occupied |
320 | * by the kernel and initrd as decrypted and write-protected. |
321 | * |
322 | * The use of write-protect attribute will prevent any of the |
323 | * memory from being cached. |
324 | */ |
325 | |
326 | /* Physical addresses gives us the identity mapped virtual addresses */ |
327 | kernel_start = __pa_symbol(_text); |
328 | kernel_end = ALIGN(__pa_symbol(_end), PMD_SIZE); |
329 | kernel_len = kernel_end - kernel_start; |
330 | |
331 | initrd_start = 0; |
332 | initrd_end = 0; |
333 | initrd_len = 0; |
334 | #ifdef CONFIG_BLK_DEV_INITRD |
335 | initrd_len = (unsigned long)bp->hdr.ramdisk_size | |
336 | ((unsigned long)bp->ext_ramdisk_size << 32); |
337 | if (initrd_len) { |
338 | initrd_start = (unsigned long)bp->hdr.ramdisk_image | |
339 | ((unsigned long)bp->ext_ramdisk_image << 32); |
340 | initrd_end = PAGE_ALIGN(initrd_start + initrd_len); |
341 | initrd_len = initrd_end - initrd_start; |
342 | } |
343 | #endif |
344 | |
345 | /* |
346 | * We're running identity mapped, so we must obtain the address to the |
347 | * SME encryption workarea using rip-relative addressing. |
348 | */ |
349 | asm ("lea sme_workarea(%%rip), %0" |
350 | : "=r" (workarea_start) |
351 | : "p" (sme_workarea)); |
352 | |
353 | /* |
354 | * Calculate required number of workarea bytes needed: |
355 | * executable encryption area size: |
356 | * stack page (PAGE_SIZE) |
357 | * encryption routine page (PAGE_SIZE) |
358 | * intermediate copy buffer (PMD_SIZE) |
359 | * pagetable structures for the encryption of the kernel |
360 | * pagetable structures for workarea (in case not currently mapped) |
361 | */ |
362 | execute_start = workarea_start; |
363 | execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE; |
364 | execute_len = execute_end - execute_start; |
365 | |
366 | /* |
367 | * One PGD for both encrypted and decrypted mappings and a set of |
368 | * PUDs and PMDs for each of the encrypted and decrypted mappings. |
369 | */ |
370 | pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD; |
371 | pgtable_area_len += sme_pgtable_calc(len: execute_end - kernel_start) * 2; |
372 | if (initrd_len) |
373 | pgtable_area_len += sme_pgtable_calc(len: initrd_len) * 2; |
374 | |
375 | /* PUDs and PMDs needed in the current pagetables for the workarea */ |
376 | pgtable_area_len += sme_pgtable_calc(len: execute_len + pgtable_area_len); |
377 | |
378 | /* |
379 | * The total workarea includes the executable encryption area and |
380 | * the pagetable area. The start of the workarea is already 2MB |
381 | * aligned, align the end of the workarea on a 2MB boundary so that |
382 | * we don't try to create/allocate PTE entries from the workarea |
383 | * before it is mapped. |
384 | */ |
385 | workarea_len = execute_len + pgtable_area_len; |
386 | workarea_end = ALIGN(workarea_start + workarea_len, PMD_SIZE); |
387 | |
388 | /* |
389 | * Set the address to the start of where newly created pagetable |
390 | * structures (PGDs, PUDs and PMDs) will be allocated. New pagetable |
391 | * structures are created when the workarea is added to the current |
392 | * pagetables and when the new encrypted and decrypted kernel |
393 | * mappings are populated. |
394 | */ |
395 | ppd.pgtable_area = (void *)execute_end; |
396 | |
397 | /* |
398 | * Make sure the current pagetable structure has entries for |
399 | * addressing the workarea. |
400 | */ |
401 | ppd.pgd = (pgd_t *)native_read_cr3_pa(); |
402 | ppd.paddr = workarea_start; |
403 | ppd.vaddr = workarea_start; |
404 | ppd.vaddr_end = workarea_end; |
405 | sme_map_range_decrypted(ppd: &ppd); |
406 | |
407 | /* Flush the TLB - no globals so cr3 is enough */ |
408 | native_write_cr3(val: __native_read_cr3()); |
409 | |
410 | /* |
411 | * A new pagetable structure is being built to allow for the kernel |
412 | * and initrd to be encrypted. It starts with an empty PGD that will |
413 | * then be populated with new PUDs and PMDs as the encrypted and |
414 | * decrypted kernel mappings are created. |
415 | */ |
416 | ppd.pgd = ppd.pgtable_area; |
417 | memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD); |
418 | ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD; |
419 | |
420 | /* |
421 | * A different PGD index/entry must be used to get different |
422 | * pagetable entries for the decrypted mapping. Choose the next |
423 | * PGD index and convert it to a virtual address to be used as |
424 | * the base of the mapping. |
425 | */ |
426 | decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1); |
427 | if (initrd_len) { |
428 | unsigned long check_base; |
429 | |
430 | check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1); |
431 | decrypted_base = max(decrypted_base, check_base); |
432 | } |
433 | decrypted_base <<= PGDIR_SHIFT; |
434 | |
435 | /* Add encrypted kernel (identity) mappings */ |
436 | ppd.paddr = kernel_start; |
437 | ppd.vaddr = kernel_start; |
438 | ppd.vaddr_end = kernel_end; |
439 | sme_map_range_encrypted(ppd: &ppd); |
440 | |
441 | /* Add decrypted, write-protected kernel (non-identity) mappings */ |
442 | ppd.paddr = kernel_start; |
443 | ppd.vaddr = kernel_start + decrypted_base; |
444 | ppd.vaddr_end = kernel_end + decrypted_base; |
445 | sme_map_range_decrypted_wp(ppd: &ppd); |
446 | |
447 | if (initrd_len) { |
448 | /* Add encrypted initrd (identity) mappings */ |
449 | ppd.paddr = initrd_start; |
450 | ppd.vaddr = initrd_start; |
451 | ppd.vaddr_end = initrd_end; |
452 | sme_map_range_encrypted(ppd: &ppd); |
453 | /* |
454 | * Add decrypted, write-protected initrd (non-identity) mappings |
455 | */ |
456 | ppd.paddr = initrd_start; |
457 | ppd.vaddr = initrd_start + decrypted_base; |
458 | ppd.vaddr_end = initrd_end + decrypted_base; |
459 | sme_map_range_decrypted_wp(ppd: &ppd); |
460 | } |
461 | |
462 | /* Add decrypted workarea mappings to both kernel mappings */ |
463 | ppd.paddr = workarea_start; |
464 | ppd.vaddr = workarea_start; |
465 | ppd.vaddr_end = workarea_end; |
466 | sme_map_range_decrypted(ppd: &ppd); |
467 | |
468 | ppd.paddr = workarea_start; |
469 | ppd.vaddr = workarea_start + decrypted_base; |
470 | ppd.vaddr_end = workarea_end + decrypted_base; |
471 | sme_map_range_decrypted(ppd: &ppd); |
472 | |
473 | /* Perform the encryption */ |
474 | sme_encrypt_execute(encrypted_kernel_vaddr: kernel_start, decrypted_kernel_vaddr: kernel_start + decrypted_base, |
475 | kernel_len, encryption_wa: workarea_start, encryption_pgd: (unsigned long)ppd.pgd); |
476 | |
477 | if (initrd_len) |
478 | sme_encrypt_execute(encrypted_kernel_vaddr: initrd_start, decrypted_kernel_vaddr: initrd_start + decrypted_base, |
479 | kernel_len: initrd_len, encryption_wa: workarea_start, |
480 | encryption_pgd: (unsigned long)ppd.pgd); |
481 | |
482 | /* |
483 | * At this point we are running encrypted. Remove the mappings for |
484 | * the decrypted areas - all that is needed for this is to remove |
485 | * the PGD entry/entries. |
486 | */ |
487 | ppd.vaddr = kernel_start + decrypted_base; |
488 | ppd.vaddr_end = kernel_end + decrypted_base; |
489 | sme_clear_pgd(ppd: &ppd); |
490 | |
491 | if (initrd_len) { |
492 | ppd.vaddr = initrd_start + decrypted_base; |
493 | ppd.vaddr_end = initrd_end + decrypted_base; |
494 | sme_clear_pgd(ppd: &ppd); |
495 | } |
496 | |
497 | ppd.vaddr = workarea_start + decrypted_base; |
498 | ppd.vaddr_end = workarea_end + decrypted_base; |
499 | sme_clear_pgd(ppd: &ppd); |
500 | |
501 | /* Flush the TLB - no globals so cr3 is enough */ |
502 | native_write_cr3(val: __native_read_cr3()); |
503 | } |
504 | |
505 | void __init sme_enable(struct boot_params *bp) |
506 | { |
507 | const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off; |
508 | unsigned int eax, ebx, ecx, edx; |
509 | unsigned long feature_mask; |
510 | bool active_by_default; |
511 | unsigned long me_mask; |
512 | char buffer[16]; |
513 | bool snp; |
514 | u64 msr; |
515 | |
516 | snp = snp_init(bp); |
517 | |
518 | /* Check for the SME/SEV support leaf */ |
519 | eax = 0x80000000; |
520 | ecx = 0; |
521 | native_cpuid(eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx); |
522 | if (eax < 0x8000001f) |
523 | return; |
524 | |
525 | #define AMD_SME_BIT BIT(0) |
526 | #define AMD_SEV_BIT BIT(1) |
527 | |
528 | /* |
529 | * Check for the SME/SEV feature: |
530 | * CPUID Fn8000_001F[EAX] |
531 | * - Bit 0 - Secure Memory Encryption support |
532 | * - Bit 1 - Secure Encrypted Virtualization support |
533 | * CPUID Fn8000_001F[EBX] |
534 | * - Bits 5:0 - Pagetable bit position used to indicate encryption |
535 | */ |
536 | eax = 0x8000001f; |
537 | ecx = 0; |
538 | native_cpuid(eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx); |
539 | /* Check whether SEV or SME is supported */ |
540 | if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT))) |
541 | return; |
542 | |
543 | me_mask = 1UL << (ebx & 0x3f); |
544 | |
545 | /* Check the SEV MSR whether SEV or SME is enabled */ |
546 | sev_status = __rdmsr(MSR_AMD64_SEV); |
547 | feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT; |
548 | |
549 | /* The SEV-SNP CC blob should never be present unless SEV-SNP is enabled. */ |
550 | if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) |
551 | snp_abort(); |
552 | |
553 | /* Check if memory encryption is enabled */ |
554 | if (feature_mask == AMD_SME_BIT) { |
555 | /* |
556 | * No SME if Hypervisor bit is set. This check is here to |
557 | * prevent a guest from trying to enable SME. For running as a |
558 | * KVM guest the MSR_AMD64_SYSCFG will be sufficient, but there |
559 | * might be other hypervisors which emulate that MSR as non-zero |
560 | * or even pass it through to the guest. |
561 | * A malicious hypervisor can still trick a guest into this |
562 | * path, but there is no way to protect against that. |
563 | */ |
564 | eax = 1; |
565 | ecx = 0; |
566 | native_cpuid(eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx); |
567 | if (ecx & BIT(31)) |
568 | return; |
569 | |
570 | /* For SME, check the SYSCFG MSR */ |
571 | msr = __rdmsr(MSR_AMD64_SYSCFG); |
572 | if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) |
573 | return; |
574 | } else { |
575 | /* SEV state cannot be controlled by a command line option */ |
576 | sme_me_mask = me_mask; |
577 | goto out; |
578 | } |
579 | |
580 | /* |
581 | * Fixups have not been applied to phys_base yet and we're running |
582 | * identity mapped, so we must obtain the address to the SME command |
583 | * line argument data using rip-relative addressing. |
584 | */ |
585 | asm ("lea sme_cmdline_arg(%%rip), %0" |
586 | : "=r" (cmdline_arg) |
587 | : "p" (sme_cmdline_arg)); |
588 | asm ("lea sme_cmdline_on(%%rip), %0" |
589 | : "=r" (cmdline_on) |
590 | : "p" (sme_cmdline_on)); |
591 | asm ("lea sme_cmdline_off(%%rip), %0" |
592 | : "=r" (cmdline_off) |
593 | : "p" (sme_cmdline_off)); |
594 | |
595 | if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT)) |
596 | active_by_default = true; |
597 | else |
598 | active_by_default = false; |
599 | |
600 | cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr | |
601 | ((u64)bp->ext_cmd_line_ptr << 32)); |
602 | |
603 | if (cmdline_find_option(cmdline_ptr, option: cmdline_arg, buffer, bufsize: sizeof(buffer)) < 0) |
604 | return; |
605 | |
606 | if (!strncmp(buffer, cmdline_on, sizeof(buffer))) |
607 | sme_me_mask = me_mask; |
608 | else if (!strncmp(buffer, cmdline_off, sizeof(buffer))) |
609 | sme_me_mask = 0; |
610 | else |
611 | sme_me_mask = active_by_default ? me_mask : 0; |
612 | out: |
613 | if (sme_me_mask) { |
614 | physical_mask &= ~sme_me_mask; |
615 | cc_vendor = CC_VENDOR_AMD; |
616 | cc_set_mask(mask: sme_me_mask); |
617 | } |
618 | } |
619 | |