diff options
Diffstat (limited to 'arch/s390/kernel/uv.c')
-rw-r--r-- | arch/s390/kernel/uv.c | 682 |
1 files changed, 437 insertions, 245 deletions
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index fc07bc39e698..9a5d5be8acf4 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -2,7 +2,7 @@ /* * Common Ultravisor functions and initialization * - * Copyright IBM Corp. 2019, 2020 + * Copyright IBM Corp. 2019, 2024 */ #define KMSG_COMPONENT "prot_virt" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt @@ -14,14 +14,14 @@ #include <linux/memblock.h> #include <linux/pagemap.h> #include <linux/swap.h> +#include <linux/pagewalk.h> #include <asm/facility.h> #include <asm/sections.h> #include <asm/uv.h> /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */ -#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST int __bootdata_preserved(prot_virt_guest); -#endif +EXPORT_SYMBOL(prot_virt_guest); /* * uv_info contains both host and guest information but it's currently only @@ -34,7 +34,6 @@ int __bootdata_preserved(prot_virt_guest); struct uv_info __bootdata_preserved(uv_info); EXPORT_SYMBOL(uv_info); -#if IS_ENABLED(CONFIG_KVM) int __bootdata_preserved(prot_virt_host); EXPORT_SYMBOL(prot_virt_host); @@ -109,7 +108,7 @@ EXPORT_SYMBOL_GPL(uv_pin_shared); * * @paddr: Absolute host address of page to be destroyed */ -static int uv_destroy_page(unsigned long paddr) +static int uv_destroy(unsigned long paddr) { struct uv_cb_cfs uvcb = { .header.cmd = UVC_CMD_DESTR_SEC_STOR, @@ -130,20 +129,33 @@ static int uv_destroy_page(unsigned long paddr) } /* - * The caller must already hold a reference to the page + * The caller must already hold a reference to the folio */ -int uv_destroy_owned_page(unsigned long paddr) +int uv_destroy_folio(struct folio *folio) { - struct page *page = phys_to_page(paddr); int rc; - get_page(page); - rc = uv_destroy_page(paddr); + /* See gmap_make_secure(): large folios cannot be secure */ + if (unlikely(folio_test_large(folio))) + return 0; + + folio_get(folio); + rc = uv_destroy(folio_to_phys(folio)); if (!rc) - clear_bit(PG_arch_1, &page->flags); - put_page(page); + clear_bit(PG_arch_1, &folio->flags); + folio_put(folio); return rc; } +EXPORT_SYMBOL(uv_destroy_folio); + +/* + * The present PTE still indirectly holds a folio reference through the mapping. + */ +int uv_destroy_pte(pte_t pte) +{ + VM_WARN_ON(!pte_present(pte)); + return uv_destroy_folio(pfn_folio(pte_pfn(pte))); +} /* * Requests the Ultravisor to encrypt a guest page and make it @@ -163,73 +175,35 @@ int uv_convert_from_secure(unsigned long paddr) return -EINVAL; return 0; } +EXPORT_SYMBOL_GPL(uv_convert_from_secure); /* - * The caller must already hold a reference to the page + * The caller must already hold a reference to the folio. */ -int uv_convert_owned_from_secure(unsigned long paddr) +int uv_convert_from_secure_folio(struct folio *folio) { - struct page *page = phys_to_page(paddr); int rc; - get_page(page); - rc = uv_convert_from_secure(paddr); + /* See gmap_make_secure(): large folios cannot be secure */ + if (unlikely(folio_test_large(folio))) + return 0; + + folio_get(folio); + rc = uv_convert_from_secure(folio_to_phys(folio)); if (!rc) - clear_bit(PG_arch_1, &page->flags); - put_page(page); + clear_bit(PG_arch_1, &folio->flags); + folio_put(folio); return rc; } +EXPORT_SYMBOL_GPL(uv_convert_from_secure_folio); /* - * Calculate the expected ref_count for a page that would otherwise have no - * further pins. This was cribbed from similar functions in other places in - * the kernel, but with some slight modifications. We know that a secure - * page can not be a huge page for example. + * The present PTE still indirectly holds a folio reference through the mapping. */ -static int expected_page_refs(struct page *page) +int uv_convert_from_secure_pte(pte_t pte) { - int res; - - res = page_mapcount(page); - if (PageSwapCache(page)) { - res++; - } else if (page_mapping(page)) { - res++; - if (page_has_private(page)) - res++; - } - return res; -} - -static int make_page_secure(struct page *page, struct uv_cb_header *uvcb) -{ - int expected, cc = 0; - - if (PageWriteback(page)) - return -EAGAIN; - expected = expected_page_refs(page); - if (!page_ref_freeze(page, expected)) - return -EBUSY; - set_bit(PG_arch_1, &page->flags); - /* - * If the UVC does not succeed or fail immediately, we don't want to - * loop for long, or we might get stall notifications. - * On the other hand, this is a complex scenario and we are holding a lot of - * locks, so we can't easily sleep and reschedule. We try only once, - * and if the UVC returned busy or partial completion, we return - * -EAGAIN and we let the callers deal with it. - */ - cc = __uv_call(0, (u64)uvcb); - page_ref_unfreeze(page, expected); - /* - * Return -ENXIO if the page was not mapped, -EINVAL for other errors. - * If busy or partially completed, return -EAGAIN. - */ - if (cc == UVC_CC_OK) - return 0; - else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL) - return -EAGAIN; - return uvcb->rc == 0x10a ? -ENXIO : -EINVAL; + VM_WARN_ON(!pte_present(pte)); + return uv_convert_from_secure_folio(pfn_folio(pte_pfn(pte))); } /** @@ -266,210 +240,208 @@ static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_str } /* - * Requests the Ultravisor to make a page accessible to a guest. - * If it's brought in the first time, it will be cleared. If - * it has been exported before, it will be decrypted and integrity - * checked. + * Calculate the expected ref_count for a folio that would otherwise have no + * further pins. This was cribbed from similar functions in other places in + * the kernel, but with some slight modifications. We know that a secure + * folio can not be a large folio, for example. */ -int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb) +static int expected_folio_refs(struct folio *folio) { - struct vm_area_struct *vma; - bool local_drain = false; - spinlock_t *ptelock; - unsigned long uaddr; - struct page *page; - pte_t *ptep; - int rc; + int res; -again: - rc = -EFAULT; - mmap_read_lock(gmap->mm); + res = folio_mapcount(folio); + if (folio_test_swapcache(folio)) { + res++; + } else if (folio_mapping(folio)) { + res++; + if (folio->private) + res++; + } + return res; +} - uaddr = __gmap_translate(gmap, gaddr); - if (IS_ERR_VALUE(uaddr)) - goto out; - vma = vma_lookup(gmap->mm, uaddr); - if (!vma) - goto out; +/** + * __make_folio_secure() - make a folio secure + * @folio: the folio to make secure + * @uvcb: the uvcb that describes the UVC to be used + * + * The folio @folio will be made secure if possible, @uvcb will be passed + * as-is to the UVC. + * + * Return: 0 on success; + * -EBUSY if the folio is in writeback or has too many references; + * -EAGAIN if the UVC needs to be attempted again; + * -ENXIO if the address is not mapped; + * -EINVAL if the UVC failed for other reasons. + * + * Context: The caller must hold exactly one extra reference on the folio + * (it's the same logic as split_folio()), and the folio must be + * locked. + */ +static int __make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb) +{ + int expected, cc = 0; + + if (folio_test_writeback(folio)) + return -EBUSY; + expected = expected_folio_refs(folio) + 1; + if (!folio_ref_freeze(folio, expected)) + return -EBUSY; + set_bit(PG_arch_1, &folio->flags); /* - * Secure pages cannot be huge and userspace should not combine both. - * In case userspace does it anyway this will result in an -EFAULT for - * the unpack. The guest is thus never reaching secure mode. If - * userspace is playing dirty tricky with mapping huge pages later - * on this will result in a segmentation fault. + * If the UVC does not succeed or fail immediately, we don't want to + * loop for long, or we might get stall notifications. + * On the other hand, this is a complex scenario and we are holding a lot of + * locks, so we can't easily sleep and reschedule. We try only once, + * and if the UVC returned busy or partial completion, we return + * -EAGAIN and we let the callers deal with it. */ - if (is_vm_hugetlb_page(vma)) - goto out; - - rc = -ENXIO; - ptep = get_locked_pte(gmap->mm, uaddr, &ptelock); - if (!ptep) - goto out; - if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) { - page = pte_page(*ptep); - rc = -EAGAIN; - if (trylock_page(page)) { - if (should_export_before_import(uvcb, gmap->mm)) - uv_convert_from_secure(page_to_phys(page)); - rc = make_page_secure(page, uvcb); - unlock_page(page); - } - } - pte_unmap_unlock(ptep, ptelock); -out: - mmap_read_unlock(gmap->mm); - - if (rc == -EAGAIN) { - /* - * If we are here because the UVC returned busy or partial - * completion, this is just a useless check, but it is safe. - */ - wait_on_page_writeback(page); - } else if (rc == -EBUSY) { - /* - * If we have tried a local drain and the page refcount - * still does not match our expected safe value, try with a - * system wide drain. This is needed if the pagevecs holding - * the page are on a different CPU. - */ - if (local_drain) { - lru_add_drain_all(); - /* We give up here, and let the caller try again */ - return -EAGAIN; - } - /* - * We are here if the page refcount does not match the - * expected safe value. The main culprits are usually - * pagevecs. With lru_add_drain() we drain the pagevecs - * on the local CPU so that hopefully the refcount will - * reach the expected safe value. - */ - lru_add_drain(); - local_drain = true; - /* And now we try again immediately after draining */ - goto again; - } else if (rc == -ENXIO) { - if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE)) - return -EFAULT; + cc = __uv_call(0, (u64)uvcb); + folio_ref_unfreeze(folio, expected); + /* + * Return -ENXIO if the folio was not mapped, -EINVAL for other errors. + * If busy or partially completed, return -EAGAIN. + */ + if (cc == UVC_CC_OK) + return 0; + else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL) return -EAGAIN; - } - return rc; + return uvcb->rc == 0x10a ? -ENXIO : -EINVAL; } -EXPORT_SYMBOL_GPL(gmap_make_secure); -int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr) +static int make_folio_secure(struct mm_struct *mm, struct folio *folio, struct uv_cb_header *uvcb) { - struct uv_cb_cts uvcb = { - .header.cmd = UVC_CMD_CONV_TO_SEC_STOR, - .header.len = sizeof(uvcb), - .guest_handle = gmap->guest_handle, - .gaddr = gaddr, - }; + int rc; - return gmap_make_secure(gmap, gaddr, &uvcb); + if (!folio_trylock(folio)) + return -EAGAIN; + if (should_export_before_import(uvcb, mm)) + uv_convert_from_secure(folio_to_phys(folio)); + rc = __make_folio_secure(folio, uvcb); + folio_unlock(folio); + + return rc; } -EXPORT_SYMBOL_GPL(gmap_convert_to_secure); /** - * gmap_destroy_page - Destroy a guest page. - * @gmap: the gmap of the guest - * @gaddr: the guest address to destroy + * s390_wiggle_split_folio() - try to drain extra references to a folio and optionally split. + * @mm: the mm containing the folio to work on + * @folio: the folio + * @split: whether to split a large folio * - * An attempt will be made to destroy the given guest page. If the attempt - * fails, an attempt is made to export the page. If both attempts fail, an - * appropriate error is returned. + * Context: Must be called while holding an extra reference to the folio; + * the mm lock should not be held. + * Return: 0 if the folio was split successfully; + * -EAGAIN if the folio was not split successfully but another attempt + * can be made, or if @split was set to false; + * -EINVAL in case of other errors. See split_folio(). */ -int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr) +static int s390_wiggle_split_folio(struct mm_struct *mm, struct folio *folio, bool split) +{ + int rc; + + lockdep_assert_not_held(&mm->mmap_lock); + folio_wait_writeback(folio); + lru_add_drain_all(); + if (split) { + folio_lock(folio); + rc = split_folio(folio); + folio_unlock(folio); + + if (rc != -EBUSY) + return rc; + } + return -EAGAIN; +} + +int make_hva_secure(struct mm_struct *mm, unsigned long hva, struct uv_cb_header *uvcb) { struct vm_area_struct *vma; - unsigned long uaddr; - struct page *page; + struct folio_walk fw; + struct folio *folio; int rc; - rc = -EFAULT; - mmap_read_lock(gmap->mm); + mmap_read_lock(mm); + vma = vma_lookup(mm, hva); + if (!vma) { + mmap_read_unlock(mm); + return -EFAULT; + } + folio = folio_walk_start(&fw, vma, hva, 0); + if (!folio) { + mmap_read_unlock(mm); + return -ENXIO; + } - uaddr = __gmap_translate(gmap, gaddr); - if (IS_ERR_VALUE(uaddr)) - goto out; - vma = vma_lookup(gmap->mm, uaddr); - if (!vma) - goto out; + folio_get(folio); /* - * Huge pages should not be able to become secure - */ - if (is_vm_hugetlb_page(vma)) - goto out; - - rc = 0; - /* we take an extra reference here */ - page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET); - if (IS_ERR_OR_NULL(page)) - goto out; - rc = uv_destroy_owned_page(page_to_phys(page)); - /* - * Fault handlers can race; it is possible that two CPUs will fault - * on the same secure page. One CPU can destroy the page, reboot, - * re-enter secure mode and import it, while the second CPU was - * stuck at the beginning of the handler. At some point the second - * CPU will be able to progress, and it will not be able to destroy - * the page. In that case we do not want to terminate the process, - * we instead try to export the page. + * Secure pages cannot be huge and userspace should not combine both. + * In case userspace does it anyway this will result in an -EFAULT for + * the unpack. The guest is thus never reaching secure mode. + * If userspace plays dirty tricks and decides to map huge pages at a + * later point in time, it will receive a segmentation fault or + * KVM_RUN will return -EFAULT. */ - if (rc) - rc = uv_convert_owned_from_secure(page_to_phys(page)); - put_page(page); -out: - mmap_read_unlock(gmap->mm); + if (folio_test_hugetlb(folio)) + rc = -EFAULT; + else if (folio_test_large(folio)) + rc = -E2BIG; + else if (!pte_write(fw.pte) || (pte_val(fw.pte) & _PAGE_INVALID)) + rc = -ENXIO; + else + rc = make_folio_secure(mm, folio, uvcb); + folio_walk_end(&fw, vma); + mmap_read_unlock(mm); + + if (rc == -E2BIG || rc == -EBUSY) + rc = s390_wiggle_split_folio(mm, folio, rc == -E2BIG); + folio_put(folio); + return rc; } -EXPORT_SYMBOL_GPL(gmap_destroy_page); +EXPORT_SYMBOL_GPL(make_hva_secure); /* - * To be called with the page locked or with an extra reference! This will - * prevent gmap_make_secure from touching the page concurrently. Having 2 - * parallel make_page_accessible is fine, as the UV calls will become a - * no-op if the page is already exported. + * To be called with the folio locked or with an extra reference! This will + * prevent gmap_make_secure from touching the folio concurrently. Having 2 + * parallel arch_make_folio_accessible is fine, as the UV calls will become a + * no-op if the folio is already exported. */ -int arch_make_page_accessible(struct page *page) +int arch_make_folio_accessible(struct folio *folio) { int rc = 0; - /* Hugepage cannot be protected, so nothing to do */ - if (PageHuge(page)) + /* See gmap_make_secure(): large folios cannot be secure */ + if (unlikely(folio_test_large(folio))) return 0; /* - * PG_arch_1 is used in 3 places: - * 1. for kernel page tables during early boot - * 2. for storage keys of huge pages and KVM - * 3. As an indication that this page might be secure. This can + * PG_arch_1 is used in 2 places: + * 1. for storage keys of hugetlb folios and KVM + * 2. As an indication that this small folio might be secure. This can * overindicate, e.g. we set the bit before calling * convert_to_secure. - * As secure pages are never huge, all 3 variants can co-exists. + * As secure pages are never large folios, both variants can co-exists. */ - if (!test_bit(PG_arch_1, &page->flags)) + if (!test_bit(PG_arch_1, &folio->flags)) return 0; - rc = uv_pin_shared(page_to_phys(page)); + rc = uv_pin_shared(folio_to_phys(folio)); if (!rc) { - clear_bit(PG_arch_1, &page->flags); + clear_bit(PG_arch_1, &folio->flags); return 0; } - rc = uv_convert_from_secure(page_to_phys(page)); + rc = uv_convert_from_secure(folio_to_phys(folio)); if (!rc) { - clear_bit(PG_arch_1, &page->flags); + clear_bit(PG_arch_1, &folio->flags); return 0; } return rc; } -EXPORT_SYMBOL_GPL(arch_make_page_accessible); - -#endif +EXPORT_SYMBOL_GPL(arch_make_folio_accessible); -#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM) static ssize_t uv_query_facilities(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -612,12 +584,32 @@ static struct kobj_attribute uv_query_supp_secret_types_attr = static ssize_t uv_query_max_secrets(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return sysfs_emit(buf, "%d\n", uv_info.max_secrets); + return sysfs_emit(buf, "%d\n", + uv_info.max_assoc_secrets + uv_info.max_retr_secrets); } static struct kobj_attribute uv_query_max_secrets_attr = __ATTR(max_secrets, 0444, uv_query_max_secrets, NULL); +static ssize_t uv_query_max_retr_secrets(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%d\n", uv_info.max_retr_secrets); +} + +static struct kobj_attribute uv_query_max_retr_secrets_attr = + __ATTR(max_retr_secrets, 0444, uv_query_max_retr_secrets, NULL); + +static ssize_t uv_query_max_assoc_secrets(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%d\n", uv_info.max_assoc_secrets); +} + +static struct kobj_attribute uv_query_max_assoc_secrets_attr = + __ATTR(max_assoc_secrets, 0444, uv_query_max_assoc_secrets, NULL); + static struct attribute *uv_query_attrs[] = { &uv_query_facilities_attr.attr, &uv_query_feature_indications_attr.attr, @@ -635,34 +627,91 @@ static struct attribute *uv_query_attrs[] = { &uv_query_supp_add_secret_pcf_attr.attr, &uv_query_supp_secret_types_attr.attr, &uv_query_max_secrets_attr.attr, + &uv_query_max_assoc_secrets_attr.attr, + &uv_query_max_retr_secrets_attr.attr, NULL, }; +static inline struct uv_cb_query_keys uv_query_keys(void) +{ + struct uv_cb_query_keys uvcb = { + .header.cmd = UVC_CMD_QUERY_KEYS, + .header.len = sizeof(uvcb) + }; + + uv_call(0, (uint64_t)&uvcb); + return uvcb; +} + +static inline ssize_t emit_hash(struct uv_key_hash *hash, char *buf, int at) +{ + return sysfs_emit_at(buf, at, "%016llx%016llx%016llx%016llx\n", + hash->dword[0], hash->dword[1], hash->dword[2], hash->dword[3]); +} + +static ssize_t uv_keys_host_key(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct uv_cb_query_keys uvcb = uv_query_keys(); + + return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_HK], buf, 0); +} + +static struct kobj_attribute uv_keys_host_key_attr = + __ATTR(host_key, 0444, uv_keys_host_key, NULL); + +static ssize_t uv_keys_backup_host_key(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct uv_cb_query_keys uvcb = uv_query_keys(); + + return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_BACK_HK], buf, 0); +} + +static struct kobj_attribute uv_keys_backup_host_key_attr = + __ATTR(backup_host_key, 0444, uv_keys_backup_host_key, NULL); + +static ssize_t uv_keys_all(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct uv_cb_query_keys uvcb = uv_query_keys(); + ssize_t len = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(uvcb.key_hashes); i++) + len += emit_hash(uvcb.key_hashes + i, buf, len); + + return len; +} + +static struct kobj_attribute uv_keys_all_attr = + __ATTR(all, 0444, uv_keys_all, NULL); + static struct attribute_group uv_query_attr_group = { .attrs = uv_query_attrs, }; +static struct attribute *uv_keys_attrs[] = { + &uv_keys_host_key_attr.attr, + &uv_keys_backup_host_key_attr.attr, + &uv_keys_all_attr.attr, + NULL, +}; + +static struct attribute_group uv_keys_attr_group = { + .attrs = uv_keys_attrs, +}; + static ssize_t uv_is_prot_virt_guest(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - int val = 0; - -#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST - val = prot_virt_guest; -#endif - return sysfs_emit(buf, "%d\n", val); + return sysfs_emit(buf, "%d\n", prot_virt_guest); } static ssize_t uv_is_prot_virt_host(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - int val = 0; - -#if IS_ENABLED(CONFIG_KVM) - val = prot_virt_host; -#endif - - return sysfs_emit(buf, "%d\n", val); + return sysfs_emit(buf, "%d\n", prot_virt_host); } static struct kobj_attribute uv_prot_virt_guest = @@ -678,9 +727,27 @@ static const struct attribute *uv_prot_virt_attrs[] = { }; static struct kset *uv_query_kset; +static struct kset *uv_keys_kset; static struct kobject *uv_kobj; -static int __init uv_info_init(void) +static int __init uv_sysfs_dir_init(const struct attribute_group *grp, + struct kset **uv_dir_kset, const char *name) +{ + struct kset *kset; + int rc; + + kset = kset_create_and_add(name, NULL, uv_kobj); + if (!kset) + return -ENOMEM; + *uv_dir_kset = kset; + + rc = sysfs_create_group(&kset->kobj, grp); + if (rc) + kset_unregister(kset); + return rc; +} + +static int __init uv_sysfs_init(void) { int rc = -ENOMEM; @@ -695,17 +762,16 @@ static int __init uv_info_init(void) if (rc) goto out_kobj; - uv_query_kset = kset_create_and_add("query", NULL, uv_kobj); - if (!uv_query_kset) { - rc = -ENOMEM; + rc = uv_sysfs_dir_init(&uv_query_attr_group, &uv_query_kset, "query"); + if (rc) goto out_ind_files; - } - rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group); - if (!rc) - return 0; + /* Get installed key hashes if available, ignore any errors */ + if (test_bit_inv(BIT_UVC_CMD_QUERY_KEYS, uv_info.inst_calls_list)) + uv_sysfs_dir_init(&uv_keys_attr_group, &uv_keys_kset, "keys"); + + return 0; - kset_unregister(uv_query_kset); out_ind_files: sysfs_remove_files(uv_kobj, uv_prot_virt_attrs); out_kobj: @@ -713,5 +779,131 @@ out_kobj: kobject_put(uv_kobj); return rc; } -device_initcall(uv_info_init); -#endif +device_initcall(uv_sysfs_init); + +/* + * Find the secret with the secret_id in the provided list. + * + * Context: might sleep. + */ +static int find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN], + const struct uv_secret_list *list, + struct uv_secret_list_item_hdr *secret) +{ + u16 i; + + for (i = 0; i < list->total_num_secrets; i++) { + if (memcmp(secret_id, list->secrets[i].id, UV_SECRET_ID_LEN) == 0) { + *secret = list->secrets[i].hdr; + return 0; + } + } + return -ENOENT; +} + +/* + * Do the actual search for `uv_get_secret_metadata`. + * + * Context: might sleep. + */ +static int find_secret(const u8 secret_id[UV_SECRET_ID_LEN], + struct uv_secret_list *list, + struct uv_secret_list_item_hdr *secret) +{ + u16 start_idx = 0; + u16 list_rc; + int ret; + + do { + uv_list_secrets(list, start_idx, &list_rc, NULL); + if (list_rc != UVC_RC_EXECUTED && list_rc != UVC_RC_MORE_DATA) { + if (list_rc == UVC_RC_INV_CMD) + return -ENODEV; + else + return -EIO; + } + ret = find_secret_in_page(secret_id, list, secret); + if (ret == 0) + return ret; + start_idx = list->next_secret_idx; + } while (list_rc == UVC_RC_MORE_DATA && start_idx < list->next_secret_idx); + + return -ENOENT; +} + +/** + * uv_get_secret_metadata() - get secret metadata for a given secret id. + * @secret_id: search pattern. + * @secret: output data, containing the secret's metadata. + * + * Search for a secret with the given secret_id in the Ultravisor secret store. + * + * Context: might sleep. + * + * Return: + * * %0: - Found entry; secret->idx and secret->type are valid. + * * %ENOENT - No entry found. + * * %ENODEV: - Not supported: UV not available or command not available. + * * %EIO: - Other unexpected UV error. + */ +int uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN], + struct uv_secret_list_item_hdr *secret) +{ + struct uv_secret_list *buf; + int rc; + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; + rc = find_secret(secret_id, buf, secret); + kfree(buf); + return rc; +} +EXPORT_SYMBOL_GPL(uv_get_secret_metadata); + +/** + * uv_retrieve_secret() - get the secret value for the secret index. + * @secret_idx: Secret index for which the secret should be retrieved. + * @buf: Buffer to store retrieved secret. + * @buf_size: Size of the buffer. The correct buffer size is reported as part of + * the result from `uv_get_secret_metadata`. + * + * Calls the Retrieve Secret UVC and translates the UV return code into an errno. + * + * Context: might sleep. + * + * Return: + * * %0 - Entry found; buffer contains a valid secret. + * * %ENOENT: - No entry found or secret at the index is non-retrievable. + * * %ENODEV: - Not supported: UV not available or command not available. + * * %EINVAL: - Buffer too small for content. + * * %EIO: - Other unexpected UV error. + */ +int uv_retrieve_secret(u16 secret_idx, u8 *buf, size_t buf_size) +{ + struct uv_cb_retr_secr uvcb = { + .header.len = sizeof(uvcb), + .header.cmd = UVC_CMD_RETR_SECRET, + .secret_idx = secret_idx, + .buf_addr = (u64)buf, + .buf_size = buf_size, + }; + + uv_call_sched(0, (u64)&uvcb); + + switch (uvcb.header.rc) { + case UVC_RC_EXECUTED: + return 0; + case UVC_RC_INV_CMD: + return -ENODEV; + case UVC_RC_RETR_SECR_STORE_EMPTY: + case UVC_RC_RETR_SECR_INV_SECRET: + case UVC_RC_RETR_SECR_INV_IDX: + return -ENOENT; + case UVC_RC_RETR_SECR_BUF_SMALL: + return -EINVAL; + default: + return -EIO; + } +} +EXPORT_SYMBOL_GPL(uv_retrieve_secret); |