1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Common Ultravisor functions and initialization |
4 | * |
5 | * Copyright IBM Corp. 2019, 2020 |
6 | */ |
7 | #define KMSG_COMPONENT "prot_virt" |
8 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
9 | |
10 | #include <linux/kernel.h> |
11 | #include <linux/types.h> |
12 | #include <linux/sizes.h> |
13 | #include <linux/bitmap.h> |
14 | #include <linux/memblock.h> |
15 | #include <linux/pagemap.h> |
16 | #include <linux/swap.h> |
17 | #include <asm/facility.h> |
18 | #include <asm/sections.h> |
19 | #include <asm/uv.h> |
20 | |
21 | /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */ |
22 | #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST |
23 | int __bootdata_preserved(prot_virt_guest); |
24 | #endif |
25 | |
26 | /* |
27 | * uv_info contains both host and guest information but it's currently only |
28 | * expected to be used within modules if it's the KVM module or for |
29 | * any PV guest module. |
30 | * |
31 | * The kernel itself will write these values once in uv_query_info() |
32 | * and then make some of them readable via a sysfs interface. |
33 | */ |
34 | struct uv_info __bootdata_preserved(uv_info); |
35 | EXPORT_SYMBOL(uv_info); |
36 | |
37 | #if IS_ENABLED(CONFIG_KVM) |
38 | int __bootdata_preserved(prot_virt_host); |
39 | EXPORT_SYMBOL(prot_virt_host); |
40 | |
41 | static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len) |
42 | { |
43 | struct uv_cb_init uvcb = { |
44 | .header.cmd = UVC_CMD_INIT_UV, |
45 | .header.len = sizeof(uvcb), |
46 | .stor_origin = stor_base, |
47 | .stor_len = stor_len, |
48 | }; |
49 | |
50 | if (uv_call(0, (uint64_t)&uvcb)) { |
51 | pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n" , |
52 | uvcb.header.rc, uvcb.header.rrc); |
53 | return -1; |
54 | } |
55 | return 0; |
56 | } |
57 | |
58 | void __init setup_uv(void) |
59 | { |
60 | void *uv_stor_base; |
61 | |
62 | if (!is_prot_virt_host()) |
63 | return; |
64 | |
65 | uv_stor_base = memblock_alloc_try_nid( |
66 | size: uv_info.uv_base_stor_len, SZ_1M, SZ_2G, |
67 | MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); |
68 | if (!uv_stor_base) { |
69 | pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n" , |
70 | uv_info.uv_base_stor_len); |
71 | goto fail; |
72 | } |
73 | |
74 | if (uv_init(__pa(uv_stor_base), stor_len: uv_info.uv_base_stor_len)) { |
75 | memblock_free(ptr: uv_stor_base, size: uv_info.uv_base_stor_len); |
76 | goto fail; |
77 | } |
78 | |
79 | pr_info("Reserving %luMB as ultravisor base storage\n" , |
80 | uv_info.uv_base_stor_len >> 20); |
81 | return; |
82 | fail: |
83 | pr_info("Disabling support for protected virtualization" ); |
84 | prot_virt_host = 0; |
85 | } |
86 | |
87 | /* |
88 | * Requests the Ultravisor to pin the page in the shared state. This will |
89 | * cause an intercept when the guest attempts to unshare the pinned page. |
90 | */ |
91 | int uv_pin_shared(unsigned long paddr) |
92 | { |
93 | struct uv_cb_cfs uvcb = { |
94 | .header.cmd = UVC_CMD_PIN_PAGE_SHARED, |
95 | .header.len = sizeof(uvcb), |
96 | .paddr = paddr, |
97 | }; |
98 | |
99 | if (uv_call(0, (u64)&uvcb)) |
100 | return -EINVAL; |
101 | return 0; |
102 | } |
103 | EXPORT_SYMBOL_GPL(uv_pin_shared); |
104 | |
105 | /* |
106 | * Requests the Ultravisor to destroy a guest page and make it |
107 | * accessible to the host. The destroy clears the page instead of |
108 | * exporting. |
109 | * |
110 | * @paddr: Absolute host address of page to be destroyed |
111 | */ |
112 | static int uv_destroy_page(unsigned long paddr) |
113 | { |
114 | struct uv_cb_cfs uvcb = { |
115 | .header.cmd = UVC_CMD_DESTR_SEC_STOR, |
116 | .header.len = sizeof(uvcb), |
117 | .paddr = paddr |
118 | }; |
119 | |
120 | if (uv_call(0, (u64)&uvcb)) { |
121 | /* |
122 | * Older firmware uses 107/d as an indication of a non secure |
123 | * page. Let us emulate the newer variant (no-op). |
124 | */ |
125 | if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd) |
126 | return 0; |
127 | return -EINVAL; |
128 | } |
129 | return 0; |
130 | } |
131 | |
132 | /* |
133 | * The caller must already hold a reference to the page |
134 | */ |
135 | int uv_destroy_owned_page(unsigned long paddr) |
136 | { |
137 | struct page *page = phys_to_page(paddr); |
138 | int rc; |
139 | |
140 | get_page(page); |
141 | rc = uv_destroy_page(paddr); |
142 | if (!rc) |
143 | clear_bit(nr: PG_arch_1, addr: &page->flags); |
144 | put_page(page); |
145 | return rc; |
146 | } |
147 | |
148 | /* |
149 | * Requests the Ultravisor to encrypt a guest page and make it |
150 | * accessible to the host for paging (export). |
151 | * |
152 | * @paddr: Absolute host address of page to be exported |
153 | */ |
154 | int uv_convert_from_secure(unsigned long paddr) |
155 | { |
156 | struct uv_cb_cfs uvcb = { |
157 | .header.cmd = UVC_CMD_CONV_FROM_SEC_STOR, |
158 | .header.len = sizeof(uvcb), |
159 | .paddr = paddr |
160 | }; |
161 | |
162 | if (uv_call(0, (u64)&uvcb)) |
163 | return -EINVAL; |
164 | return 0; |
165 | } |
166 | |
167 | /* |
168 | * The caller must already hold a reference to the page |
169 | */ |
170 | int uv_convert_owned_from_secure(unsigned long paddr) |
171 | { |
172 | struct page *page = phys_to_page(paddr); |
173 | int rc; |
174 | |
175 | get_page(page); |
176 | rc = uv_convert_from_secure(paddr); |
177 | if (!rc) |
178 | clear_bit(nr: PG_arch_1, addr: &page->flags); |
179 | put_page(page); |
180 | return rc; |
181 | } |
182 | |
183 | /* |
184 | * Calculate the expected ref_count for a page that would otherwise have no |
185 | * further pins. This was cribbed from similar functions in other places in |
186 | * the kernel, but with some slight modifications. We know that a secure |
187 | * page can not be a huge page for example. |
188 | */ |
189 | static int expected_page_refs(struct page *page) |
190 | { |
191 | int res; |
192 | |
193 | res = page_mapcount(page); |
194 | if (PageSwapCache(page)) { |
195 | res++; |
196 | } else if (page_mapping(page)) { |
197 | res++; |
198 | if (page_has_private(page)) |
199 | res++; |
200 | } |
201 | return res; |
202 | } |
203 | |
204 | static int make_page_secure(struct page *page, struct uv_cb_header *uvcb) |
205 | { |
206 | int expected, cc = 0; |
207 | |
208 | if (PageWriteback(page)) |
209 | return -EAGAIN; |
210 | expected = expected_page_refs(page); |
211 | if (!page_ref_freeze(page, count: expected)) |
212 | return -EBUSY; |
213 | set_bit(nr: PG_arch_1, addr: &page->flags); |
214 | /* |
215 | * If the UVC does not succeed or fail immediately, we don't want to |
216 | * loop for long, or we might get stall notifications. |
217 | * On the other hand, this is a complex scenario and we are holding a lot of |
218 | * locks, so we can't easily sleep and reschedule. We try only once, |
219 | * and if the UVC returned busy or partial completion, we return |
220 | * -EAGAIN and we let the callers deal with it. |
221 | */ |
222 | cc = __uv_call(0, (u64)uvcb); |
223 | page_ref_unfreeze(page, count: expected); |
224 | /* |
225 | * Return -ENXIO if the page was not mapped, -EINVAL for other errors. |
226 | * If busy or partially completed, return -EAGAIN. |
227 | */ |
228 | if (cc == UVC_CC_OK) |
229 | return 0; |
230 | else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL) |
231 | return -EAGAIN; |
232 | return uvcb->rc == 0x10a ? -ENXIO : -EINVAL; |
233 | } |
234 | |
235 | /** |
236 | * should_export_before_import - Determine whether an export is needed |
237 | * before an import-like operation |
238 | * @uvcb: the Ultravisor control block of the UVC to be performed |
239 | * @mm: the mm of the process |
240 | * |
241 | * Returns whether an export is needed before every import-like operation. |
242 | * This is needed for shared pages, which don't trigger a secure storage |
243 | * exception when accessed from a different guest. |
244 | * |
245 | * Although considered as one, the Unpin Page UVC is not an actual import, |
246 | * so it is not affected. |
247 | * |
248 | * No export is needed also when there is only one protected VM, because the |
249 | * page cannot belong to the wrong VM in that case (there is no "other VM" |
250 | * it can belong to). |
251 | * |
252 | * Return: true if an export is needed before every import, otherwise false. |
253 | */ |
254 | static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm) |
255 | { |
256 | /* |
257 | * The misc feature indicates, among other things, that importing a |
258 | * shared page from a different protected VM will automatically also |
259 | * transfer its ownership. |
260 | */ |
261 | if (uv_has_feature(BIT_UV_FEAT_MISC)) |
262 | return false; |
263 | if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED) |
264 | return false; |
265 | return atomic_read(v: &mm->context.protected_count) > 1; |
266 | } |
267 | |
268 | /* |
269 | * Requests the Ultravisor to make a page accessible to a guest. |
270 | * If it's brought in the first time, it will be cleared. If |
271 | * it has been exported before, it will be decrypted and integrity |
272 | * checked. |
273 | */ |
274 | int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb) |
275 | { |
276 | struct vm_area_struct *vma; |
277 | bool local_drain = false; |
278 | spinlock_t *ptelock; |
279 | unsigned long uaddr; |
280 | struct page *page; |
281 | pte_t *ptep; |
282 | int rc; |
283 | |
284 | again: |
285 | rc = -EFAULT; |
286 | mmap_read_lock(mm: gmap->mm); |
287 | |
288 | uaddr = __gmap_translate(gmap, gaddr); |
289 | if (IS_ERR_VALUE(uaddr)) |
290 | goto out; |
291 | vma = vma_lookup(mm: gmap->mm, addr: uaddr); |
292 | if (!vma) |
293 | goto out; |
294 | /* |
295 | * Secure pages cannot be huge and userspace should not combine both. |
296 | * In case userspace does it anyway this will result in an -EFAULT for |
297 | * the unpack. The guest is thus never reaching secure mode. If |
298 | * userspace is playing dirty tricky with mapping huge pages later |
299 | * on this will result in a segmentation fault. |
300 | */ |
301 | if (is_vm_hugetlb_page(vma)) |
302 | goto out; |
303 | |
304 | rc = -ENXIO; |
305 | ptep = get_locked_pte(mm: gmap->mm, addr: uaddr, ptl: &ptelock); |
306 | if (!ptep) |
307 | goto out; |
308 | if (pte_present(a: *ptep) && !(pte_val(pte: *ptep) & _PAGE_INVALID) && pte_write(pte: *ptep)) { |
309 | page = pte_page(*ptep); |
310 | rc = -EAGAIN; |
311 | if (trylock_page(page)) { |
312 | if (should_export_before_import(uvcb, mm: gmap->mm)) |
313 | uv_convert_from_secure(page_to_phys(page)); |
314 | rc = make_page_secure(page, uvcb); |
315 | unlock_page(page); |
316 | } |
317 | } |
318 | pte_unmap_unlock(ptep, ptelock); |
319 | out: |
320 | mmap_read_unlock(mm: gmap->mm); |
321 | |
322 | if (rc == -EAGAIN) { |
323 | /* |
324 | * If we are here because the UVC returned busy or partial |
325 | * completion, this is just a useless check, but it is safe. |
326 | */ |
327 | wait_on_page_writeback(page); |
328 | } else if (rc == -EBUSY) { |
329 | /* |
330 | * If we have tried a local drain and the page refcount |
331 | * still does not match our expected safe value, try with a |
332 | * system wide drain. This is needed if the pagevecs holding |
333 | * the page are on a different CPU. |
334 | */ |
335 | if (local_drain) { |
336 | lru_add_drain_all(); |
337 | /* We give up here, and let the caller try again */ |
338 | return -EAGAIN; |
339 | } |
340 | /* |
341 | * We are here if the page refcount does not match the |
342 | * expected safe value. The main culprits are usually |
343 | * pagevecs. With lru_add_drain() we drain the pagevecs |
344 | * on the local CPU so that hopefully the refcount will |
345 | * reach the expected safe value. |
346 | */ |
347 | lru_add_drain(); |
348 | local_drain = true; |
349 | /* And now we try again immediately after draining */ |
350 | goto again; |
351 | } else if (rc == -ENXIO) { |
352 | if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE)) |
353 | return -EFAULT; |
354 | return -EAGAIN; |
355 | } |
356 | return rc; |
357 | } |
358 | EXPORT_SYMBOL_GPL(gmap_make_secure); |
359 | |
360 | int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr) |
361 | { |
362 | struct uv_cb_cts uvcb = { |
363 | .header.cmd = UVC_CMD_CONV_TO_SEC_STOR, |
364 | .header.len = sizeof(uvcb), |
365 | .guest_handle = gmap->guest_handle, |
366 | .gaddr = gaddr, |
367 | }; |
368 | |
369 | return gmap_make_secure(gmap, gaddr, &uvcb); |
370 | } |
371 | EXPORT_SYMBOL_GPL(gmap_convert_to_secure); |
372 | |
373 | /** |
374 | * gmap_destroy_page - Destroy a guest page. |
375 | * @gmap: the gmap of the guest |
376 | * @gaddr: the guest address to destroy |
377 | * |
378 | * An attempt will be made to destroy the given guest page. If the attempt |
379 | * fails, an attempt is made to export the page. If both attempts fail, an |
380 | * appropriate error is returned. |
381 | */ |
382 | int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr) |
383 | { |
384 | struct vm_area_struct *vma; |
385 | unsigned long uaddr; |
386 | struct page *page; |
387 | int rc; |
388 | |
389 | rc = -EFAULT; |
390 | mmap_read_lock(mm: gmap->mm); |
391 | |
392 | uaddr = __gmap_translate(gmap, gaddr); |
393 | if (IS_ERR_VALUE(uaddr)) |
394 | goto out; |
395 | vma = vma_lookup(mm: gmap->mm, addr: uaddr); |
396 | if (!vma) |
397 | goto out; |
398 | /* |
399 | * Huge pages should not be able to become secure |
400 | */ |
401 | if (is_vm_hugetlb_page(vma)) |
402 | goto out; |
403 | |
404 | rc = 0; |
405 | /* we take an extra reference here */ |
406 | page = follow_page(vma, address: uaddr, foll_flags: FOLL_WRITE | FOLL_GET); |
407 | if (IS_ERR_OR_NULL(ptr: page)) |
408 | goto out; |
409 | rc = uv_destroy_owned_page(page_to_phys(page)); |
410 | /* |
411 | * Fault handlers can race; it is possible that two CPUs will fault |
412 | * on the same secure page. One CPU can destroy the page, reboot, |
413 | * re-enter secure mode and import it, while the second CPU was |
414 | * stuck at the beginning of the handler. At some point the second |
415 | * CPU will be able to progress, and it will not be able to destroy |
416 | * the page. In that case we do not want to terminate the process, |
417 | * we instead try to export the page. |
418 | */ |
419 | if (rc) |
420 | rc = uv_convert_owned_from_secure(page_to_phys(page)); |
421 | put_page(page); |
422 | out: |
423 | mmap_read_unlock(mm: gmap->mm); |
424 | return rc; |
425 | } |
426 | EXPORT_SYMBOL_GPL(gmap_destroy_page); |
427 | |
428 | /* |
429 | * To be called with the page locked or with an extra reference! This will |
430 | * prevent gmap_make_secure from touching the page concurrently. Having 2 |
431 | * parallel make_page_accessible is fine, as the UV calls will become a |
432 | * no-op if the page is already exported. |
433 | */ |
434 | int arch_make_page_accessible(struct page *page) |
435 | { |
436 | int rc = 0; |
437 | |
438 | /* Hugepage cannot be protected, so nothing to do */ |
439 | if (PageHuge(page)) |
440 | return 0; |
441 | |
442 | /* |
443 | * PG_arch_1 is used in 3 places: |
444 | * 1. for kernel page tables during early boot |
445 | * 2. for storage keys of huge pages and KVM |
446 | * 3. As an indication that this page might be secure. This can |
447 | * overindicate, e.g. we set the bit before calling |
448 | * convert_to_secure. |
449 | * As secure pages are never huge, all 3 variants can co-exists. |
450 | */ |
451 | if (!test_bit(PG_arch_1, &page->flags)) |
452 | return 0; |
453 | |
454 | rc = uv_pin_shared(page_to_phys(page)); |
455 | if (!rc) { |
456 | clear_bit(nr: PG_arch_1, addr: &page->flags); |
457 | return 0; |
458 | } |
459 | |
460 | rc = uv_convert_from_secure(page_to_phys(page)); |
461 | if (!rc) { |
462 | clear_bit(nr: PG_arch_1, addr: &page->flags); |
463 | return 0; |
464 | } |
465 | |
466 | return rc; |
467 | } |
468 | EXPORT_SYMBOL_GPL(arch_make_page_accessible); |
469 | |
470 | #endif |
471 | |
472 | #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM) |
473 | static ssize_t uv_query_facilities(struct kobject *kobj, |
474 | struct kobj_attribute *attr, char *buf) |
475 | { |
476 | return sysfs_emit(buf, fmt: "%lx\n%lx\n%lx\n%lx\n" , |
477 | uv_info.inst_calls_list[0], |
478 | uv_info.inst_calls_list[1], |
479 | uv_info.inst_calls_list[2], |
480 | uv_info.inst_calls_list[3]); |
481 | } |
482 | |
483 | static struct kobj_attribute uv_query_facilities_attr = |
484 | __ATTR(facilities, 0444, uv_query_facilities, NULL); |
485 | |
486 | static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj, |
487 | struct kobj_attribute *attr, char *buf) |
488 | { |
489 | return sysfs_emit(buf, fmt: "%lx\n" , uv_info.supp_se_hdr_ver); |
490 | } |
491 | |
492 | static struct kobj_attribute uv_query_supp_se_hdr_ver_attr = |
493 | __ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL); |
494 | |
495 | static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj, |
496 | struct kobj_attribute *attr, char *buf) |
497 | { |
498 | return sysfs_emit(buf, fmt: "%lx\n" , uv_info.supp_se_hdr_pcf); |
499 | } |
500 | |
501 | static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr = |
502 | __ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL); |
503 | |
504 | static ssize_t uv_query_dump_cpu_len(struct kobject *kobj, |
505 | struct kobj_attribute *attr, char *buf) |
506 | { |
507 | return sysfs_emit(buf, fmt: "%lx\n" , uv_info.guest_cpu_stor_len); |
508 | } |
509 | |
510 | static struct kobj_attribute uv_query_dump_cpu_len_attr = |
511 | __ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL); |
512 | |
513 | static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj, |
514 | struct kobj_attribute *attr, char *buf) |
515 | { |
516 | return sysfs_emit(buf, fmt: "%lx\n" , uv_info.conf_dump_storage_state_len); |
517 | } |
518 | |
519 | static struct kobj_attribute uv_query_dump_storage_state_len_attr = |
520 | __ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL); |
521 | |
522 | static ssize_t uv_query_dump_finalize_len(struct kobject *kobj, |
523 | struct kobj_attribute *attr, char *buf) |
524 | { |
525 | return sysfs_emit(buf, fmt: "%lx\n" , uv_info.conf_dump_finalize_len); |
526 | } |
527 | |
528 | static struct kobj_attribute uv_query_dump_finalize_len_attr = |
529 | __ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL); |
530 | |
531 | static ssize_t uv_query_feature_indications(struct kobject *kobj, |
532 | struct kobj_attribute *attr, char *buf) |
533 | { |
534 | return sysfs_emit(buf, fmt: "%lx\n" , uv_info.uv_feature_indications); |
535 | } |
536 | |
537 | static struct kobj_attribute uv_query_feature_indications_attr = |
538 | __ATTR(feature_indications, 0444, uv_query_feature_indications, NULL); |
539 | |
540 | static ssize_t uv_query_max_guest_cpus(struct kobject *kobj, |
541 | struct kobj_attribute *attr, char *buf) |
542 | { |
543 | return sysfs_emit(buf, fmt: "%d\n" , uv_info.max_guest_cpu_id + 1); |
544 | } |
545 | |
546 | static struct kobj_attribute uv_query_max_guest_cpus_attr = |
547 | __ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL); |
548 | |
549 | static ssize_t uv_query_max_guest_vms(struct kobject *kobj, |
550 | struct kobj_attribute *attr, char *buf) |
551 | { |
552 | return sysfs_emit(buf, fmt: "%d\n" , uv_info.max_num_sec_conf); |
553 | } |
554 | |
555 | static struct kobj_attribute uv_query_max_guest_vms_attr = |
556 | __ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL); |
557 | |
558 | static ssize_t uv_query_max_guest_addr(struct kobject *kobj, |
559 | struct kobj_attribute *attr, char *buf) |
560 | { |
561 | return sysfs_emit(buf, fmt: "%lx\n" , uv_info.max_sec_stor_addr); |
562 | } |
563 | |
564 | static struct kobj_attribute uv_query_max_guest_addr_attr = |
565 | __ATTR(max_address, 0444, uv_query_max_guest_addr, NULL); |
566 | |
567 | static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj, |
568 | struct kobj_attribute *attr, char *buf) |
569 | { |
570 | return sysfs_emit(buf, fmt: "%lx\n" , uv_info.supp_att_req_hdr_ver); |
571 | } |
572 | |
573 | static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr = |
574 | __ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL); |
575 | |
576 | static ssize_t uv_query_supp_att_pflags(struct kobject *kobj, |
577 | struct kobj_attribute *attr, char *buf) |
578 | { |
579 | return sysfs_emit(buf, fmt: "%lx\n" , uv_info.supp_att_pflags); |
580 | } |
581 | |
582 | static struct kobj_attribute uv_query_supp_att_pflags_attr = |
583 | __ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL); |
584 | |
585 | static ssize_t uv_query_supp_add_secret_req_ver(struct kobject *kobj, |
586 | struct kobj_attribute *attr, char *buf) |
587 | { |
588 | return sysfs_emit(buf, fmt: "%lx\n" , uv_info.supp_add_secret_req_ver); |
589 | } |
590 | |
591 | static struct kobj_attribute uv_query_supp_add_secret_req_ver_attr = |
592 | __ATTR(supp_add_secret_req_ver, 0444, uv_query_supp_add_secret_req_ver, NULL); |
593 | |
594 | static ssize_t uv_query_supp_add_secret_pcf(struct kobject *kobj, |
595 | struct kobj_attribute *attr, char *buf) |
596 | { |
597 | return sysfs_emit(buf, fmt: "%lx\n" , uv_info.supp_add_secret_pcf); |
598 | } |
599 | |
600 | static struct kobj_attribute uv_query_supp_add_secret_pcf_attr = |
601 | __ATTR(supp_add_secret_pcf, 0444, uv_query_supp_add_secret_pcf, NULL); |
602 | |
603 | static ssize_t uv_query_supp_secret_types(struct kobject *kobj, |
604 | struct kobj_attribute *attr, char *buf) |
605 | { |
606 | return sysfs_emit(buf, fmt: "%lx\n" , uv_info.supp_secret_types); |
607 | } |
608 | |
609 | static struct kobj_attribute uv_query_supp_secret_types_attr = |
610 | __ATTR(supp_secret_types, 0444, uv_query_supp_secret_types, NULL); |
611 | |
612 | static ssize_t uv_query_max_secrets(struct kobject *kobj, |
613 | struct kobj_attribute *attr, char *buf) |
614 | { |
615 | return sysfs_emit(buf, fmt: "%d\n" , uv_info.max_secrets); |
616 | } |
617 | |
618 | static struct kobj_attribute uv_query_max_secrets_attr = |
619 | __ATTR(max_secrets, 0444, uv_query_max_secrets, NULL); |
620 | |
621 | static struct attribute *uv_query_attrs[] = { |
622 | &uv_query_facilities_attr.attr, |
623 | &uv_query_feature_indications_attr.attr, |
624 | &uv_query_max_guest_cpus_attr.attr, |
625 | &uv_query_max_guest_vms_attr.attr, |
626 | &uv_query_max_guest_addr_attr.attr, |
627 | &uv_query_supp_se_hdr_ver_attr.attr, |
628 | &uv_query_supp_se_hdr_pcf_attr.attr, |
629 | &uv_query_dump_storage_state_len_attr.attr, |
630 | &uv_query_dump_finalize_len_attr.attr, |
631 | &uv_query_dump_cpu_len_attr.attr, |
632 | &uv_query_supp_att_req_hdr_ver_attr.attr, |
633 | &uv_query_supp_att_pflags_attr.attr, |
634 | &uv_query_supp_add_secret_req_ver_attr.attr, |
635 | &uv_query_supp_add_secret_pcf_attr.attr, |
636 | &uv_query_supp_secret_types_attr.attr, |
637 | &uv_query_max_secrets_attr.attr, |
638 | NULL, |
639 | }; |
640 | |
641 | static struct attribute_group uv_query_attr_group = { |
642 | .attrs = uv_query_attrs, |
643 | }; |
644 | |
645 | static ssize_t uv_is_prot_virt_guest(struct kobject *kobj, |
646 | struct kobj_attribute *attr, char *buf) |
647 | { |
648 | int val = 0; |
649 | |
650 | #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST |
651 | val = prot_virt_guest; |
652 | #endif |
653 | return sysfs_emit(buf, fmt: "%d\n" , val); |
654 | } |
655 | |
656 | static ssize_t uv_is_prot_virt_host(struct kobject *kobj, |
657 | struct kobj_attribute *attr, char *buf) |
658 | { |
659 | int val = 0; |
660 | |
661 | #if IS_ENABLED(CONFIG_KVM) |
662 | val = prot_virt_host; |
663 | #endif |
664 | |
665 | return sysfs_emit(buf, fmt: "%d\n" , val); |
666 | } |
667 | |
668 | static struct kobj_attribute uv_prot_virt_guest = |
669 | __ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL); |
670 | |
671 | static struct kobj_attribute uv_prot_virt_host = |
672 | __ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL); |
673 | |
674 | static const struct attribute *uv_prot_virt_attrs[] = { |
675 | &uv_prot_virt_guest.attr, |
676 | &uv_prot_virt_host.attr, |
677 | NULL, |
678 | }; |
679 | |
680 | static struct kset *uv_query_kset; |
681 | static struct kobject *uv_kobj; |
682 | |
683 | static int __init uv_info_init(void) |
684 | { |
685 | int rc = -ENOMEM; |
686 | |
687 | if (!test_facility(158)) |
688 | return 0; |
689 | |
690 | uv_kobj = kobject_create_and_add(name: "uv" , parent: firmware_kobj); |
691 | if (!uv_kobj) |
692 | return -ENOMEM; |
693 | |
694 | rc = sysfs_create_files(kobj: uv_kobj, attr: uv_prot_virt_attrs); |
695 | if (rc) |
696 | goto out_kobj; |
697 | |
698 | uv_query_kset = kset_create_and_add(name: "query" , NULL, parent_kobj: uv_kobj); |
699 | if (!uv_query_kset) { |
700 | rc = -ENOMEM; |
701 | goto out_ind_files; |
702 | } |
703 | |
704 | rc = sysfs_create_group(kobj: &uv_query_kset->kobj, grp: &uv_query_attr_group); |
705 | if (!rc) |
706 | return 0; |
707 | |
708 | kset_unregister(kset: uv_query_kset); |
709 | out_ind_files: |
710 | sysfs_remove_files(kobj: uv_kobj, attr: uv_prot_virt_attrs); |
711 | out_kobj: |
712 | kobject_del(kobj: uv_kobj); |
713 | kobject_put(kobj: uv_kobj); |
714 | return rc; |
715 | } |
716 | device_initcall(uv_info_init); |
717 | #endif |
718 | |