x86/sev: Add RMP entry lookup helpers
Add a helper that can be used to access information contained in the RMP entry corresponding to a particular PFN. This will be needed to make decisions on how to handle setting up mappings in the NPT in response to guest page-faults and handling things like cleaning up pages and setting them back to the default hypervisor-owned state when they are no longer being used for private data. [ mdr: separate 'assigned' indicator from return code, and simplify function signatures for various helpers. ] Signed-off-by: Brijesh Singh <brijesh.singh@amd.com> Co-developed-by: Ashish Kalra <ashish.kalra@amd.com> Signed-off-by: Ashish Kalra <ashish.kalra@amd.com> Signed-off-by: Michael Roth <michael.roth@amd.com> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Link: https://lore.kernel.org/r/20240126041126.1927228-7-michael.roth@amd.com
This commit is contained in:
parent
e3fd08afb7
commit
94b36bc244
@ -90,6 +90,7 @@ extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
|
||||
/* RMP page size */
|
||||
#define RMP_PG_SIZE_4K 0
|
||||
#define RMP_PG_SIZE_2M 1
|
||||
#define RMP_TO_PG_LEVEL(level) (((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M)
|
||||
|
||||
#define RMPADJUST_VMSA_PAGE_BIT BIT(16)
|
||||
|
||||
@ -245,8 +246,10 @@ static inline u64 sev_get_status(void) { return 0; }
|
||||
|
||||
#ifdef CONFIG_KVM_AMD_SEV
|
||||
bool snp_probe_rmptable_info(void);
|
||||
int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level);
|
||||
#else
|
||||
static inline bool snp_probe_rmptable_info(void) { return false; }
|
||||
static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; }
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -53,6 +53,9 @@ struct rmpentry {
|
||||
*/
|
||||
#define RMPTABLE_CPU_BOOKKEEPING_SZ 0x4000
|
||||
|
||||
/* Mask to apply to a PFN to get the first PFN of a 2MB page */
|
||||
#define PFN_PMD_MASK GENMASK_ULL(63, PMD_SHIFT - PAGE_SHIFT)
|
||||
|
||||
static u64 probed_rmp_base, probed_rmp_size;
|
||||
static struct rmpentry *rmptable __ro_after_init;
|
||||
static u64 rmptable_max_pfn __ro_after_init;
|
||||
@ -214,3 +217,49 @@ nosnp:
|
||||
* This must be called after the IOMMU has been initialized.
|
||||
*/
|
||||
device_initcall(snp_rmptable_init);
|
||||
|
||||
static struct rmpentry *get_rmpentry(u64 pfn)
|
||||
{
|
||||
if (WARN_ON_ONCE(pfn > rmptable_max_pfn))
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
return &rmptable[pfn];
|
||||
}
|
||||
|
||||
static struct rmpentry *__snp_lookup_rmpentry(u64 pfn, int *level)
|
||||
{
|
||||
struct rmpentry *large_entry, *entry;
|
||||
|
||||
if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
entry = get_rmpentry(pfn);
|
||||
if (IS_ERR(entry))
|
||||
return entry;
|
||||
|
||||
/*
|
||||
* Find the authoritative RMP entry for a PFN. This can be either a 4K
|
||||
* RMP entry or a special large RMP entry that is authoritative for a
|
||||
* whole 2M area.
|
||||
*/
|
||||
large_entry = get_rmpentry(pfn & PFN_PMD_MASK);
|
||||
if (IS_ERR(large_entry))
|
||||
return large_entry;
|
||||
|
||||
*level = RMP_TO_PG_LEVEL(large_entry->pagesize);
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level)
|
||||
{
|
||||
struct rmpentry *e;
|
||||
|
||||
e = __snp_lookup_rmpentry(pfn, level);
|
||||
if (IS_ERR(e))
|
||||
return PTR_ERR(e);
|
||||
|
||||
*assigned = !!e->assigned;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(snp_lookup_rmpentry);
|
||||
|
Loading…
x
Reference in New Issue
Block a user