[svsm-devel] [PATCH v5 04/13] x86/sev: Perform PVALIDATE using the SVSM when not at VMPL0
Borislav Petkov
bp at alien8.de
Thu Jun 6 16:38:44 CEST 2024
On Wed, Jun 05, 2024 at 10:18:47AM -0500, Tom Lendacky wrote:
> The PVALIDATE instruction can only be performed at VMPL0. An SVSM will
> be present when running at VMPL1 or a lower privilege level.
>
> When an SVSM is present, use the SVSM_CORE_PVALIDATE call to perform
> memory validation instead of issuing the PVALIDATE instruction directly.
>
> The validation of a single 4K page is now explicitly identified as such
> in the function name, pvalidate_4k_page(). The pvalidate_pages() function
> is used for validating 1 or more pages at either 4K or 2M in size. Each
> function, however, determines whether it can issue the PVALIDATE directly
> or whether the SVSM needs to be invoked.
>
> Signed-off-by: Tom Lendacky <thomas.lendacky at amd.com>
> ---
> arch/x86/boot/compressed/sev.c | 45 +++++-
> arch/x86/include/asm/sev.h | 26 ++++
> arch/x86/kernel/sev-shared.c | 250 +++++++++++++++++++++++++++++++--
> arch/x86/kernel/sev.c | 30 ++--
> 4 files changed, 325 insertions(+), 26 deletions(-)
Some touchups ontop like slimming down indentation levels, etc.
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index 226b68b4a29f..ce941a9890f8 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -284,11 +284,12 @@ void sev_es_shutdown_ghcb(void)
error("SEV-ES CPU Features missing.");
/*
- * This is used to determine whether to use the GHCB MSR protocol or
- * the GHCB shared page to perform a GHCB request. Since the GHCB page
- * is being changed to encrypted, it can't be used to perform GHCB
- * requests. Clear the boot_ghcb variable so that the GHCB MSR protocol
- * is used to change the GHCB page over to an encrypted page.
+ * This denotes whether to use the GHCB MSR protocol or the GHCB
+ * shared page to perform a GHCB request. Since the GHCB page is
+ * being changed to encrypted, it can't be used to perform GHCB
+ * requests. Clear the boot_ghcb variable so that the GHCB MSR
+ * protocol is used to change the GHCB page over to an encrypted
+ * page.
*/
boot_ghcb = NULL;
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index 8b191e313c0a..b889be32ef9c 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -1220,6 +1220,15 @@ static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
}
}
+static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size,
+ int ret, u64 svsm_ret)
+{
+ WARN(1, "PVALIDATE failure: pfn: 0x%llx, action: %u, size: %u, ret: %d, svsm_ret: 0x%llx\n",
+ pfn, action, page_size, ret, svsm_ret);
+
+ sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
+}
+
static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret)
{
unsigned int page_size;
@@ -1230,16 +1239,7 @@ static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svs
action = pc->entry[pc->cur_index].action;
page_size = pc->entry[pc->cur_index].page_size;
- WARN(1, "PVALIDATE failure: pfn 0x%llx, action=%u, size=%u - ret=%d, svsm_ret=0x%llx\n",
- pfn, action, page_size, ret, svsm_ret);
- sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
-}
-
-static void pval_terminate(u64 pfn, bool action, unsigned int page_size, int ret)
-{
- WARN(1, "PVALIDATE failure: pfn 0x%llx, action=%u, size=%u - ret=%d\n",
- pfn, action, page_size, ret);
- sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
+ __pval_terminate(pfn, action, page_size, ret, svsm_ret);
}
static void svsm_pval_4k_page(unsigned long paddr, bool validate)
@@ -1284,7 +1284,7 @@ static void pvalidate_4k_page(unsigned long vaddr, unsigned long paddr, bool val
int ret;
/*
- * This can be called very early in the boot, so use rip-relative
+ * This can be called very early during boot, so use rIP-relative
* references as needed.
*/
if (RIP_REL_REF(snp_vmpl)) {
@@ -1292,7 +1292,7 @@ static void pvalidate_4k_page(unsigned long vaddr, unsigned long paddr, bool val
} else {
ret = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
if (ret)
- pval_terminate(PHYS_PFN(paddr), validate, RMP_PG_SIZE_4K, ret);
+ __pval_terminate(PHYS_PFN(paddr), validate, RMP_PG_SIZE_4K, ret, 0);
}
}
@@ -1315,16 +1315,19 @@ static void pval_pages(struct snp_psc_desc *desc)
validate = e->operation == SNP_PAGE_STATE_PRIVATE;
rc = pvalidate(vaddr, size, validate);
+ if (!rc)
+ continue;
+
if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) {
unsigned long vaddr_end = vaddr + PMD_SIZE;
for (; vaddr < vaddr_end; vaddr += PAGE_SIZE, pfn++) {
rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
if (rc)
- pval_terminate(pfn, validate, RMP_PG_SIZE_4K, rc);
+ __pval_terminate(pfn, validate, RMP_PG_SIZE_4K, rc, 0);
}
- } else if (rc) {
- pval_terminate(pfn, validate, size, rc);
+ } else {
+ __pval_terminate(pfn, validate, size, rc, 0);
}
}
}
@@ -1429,24 +1432,26 @@ static void svsm_pval_pages(struct snp_psc_desc *desc)
do {
ret = svsm_perform_call_protocol(&call);
- if (ret) {
- /*
- * Check if the entry failed because of an RMP mismatch (a
- * PVALIDATE at 2M was requested, but the page is mapped in
- * the RMP as 4K).
- */
- if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH &&
- pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) {
- /* Save this entry for post-processing at 4K */
- pv_4k[pv_4k_count++] = pc->entry[pc->cur_index];
-
- /* Skip to the next one unless at the end of the list */
- pc->cur_index++;
- if (pc->cur_index < pc->num_entries)
- ret = -EAGAIN;
- else
- ret = 0;
- }
+ if (!ret)
+ continue;
+
+ /*
+ * Check if the entry failed because of an RMP mismatch (a
+ * PVALIDATE at 2M was requested, but the page is mapped in
+ * the RMP as 4K).
+ */
+
+ if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH &&
+ pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) {
+ /* Save this entry for post-processing at 4K */
+ pv_4k[pv_4k_count++] = pc->entry[pc->cur_index];
+
+ /* Skip to the next one unless at the end of the list */
+ pc->cur_index++;
+ if (pc->cur_index < pc->num_entries)
+ ret = -EAGAIN;
+ else
+ ret = 0;
}
} while (ret == -EAGAIN);
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
More information about the Svsm-devel
mailing list