x86/virt/tdx: Designate reserved areas for all TDMRs
As the last step of constructing TDMRs, populate reserved areas for all TDMRs. Cover all memory holes and PAMTs with a TMDR reserved area. [ dhansen: trim down chagnelog ] Signed-off-by: Kai Huang <kai.huang@intel.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Isaku Yamahata <isaku.yamahata@intel.com> Reviewed-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reviewed-by: Yuan Yao <yuan.yao@intel.com> Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com> Link: https://lore.kernel.org/all/20231208170740.53979-12-dave.hansen%40intel.com
This commit is contained in:
@ -23,6 +23,7 @@
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/align.h>
|
||||
#include <linux/sort.h>
|
||||
#include <asm/msr-index.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/cpufeature.h>
|
||||
@ -662,6 +663,207 @@ static unsigned long tdmrs_count_pamt_kb(struct tdmr_info_list *tdmr_list)
|
||||
return pamt_size / 1024;
|
||||
}
|
||||
|
||||
static int tdmr_add_rsvd_area(struct tdmr_info *tdmr, int *p_idx, u64 addr,
|
||||
u64 size, u16 max_reserved_per_tdmr)
|
||||
{
|
||||
struct tdmr_reserved_area *rsvd_areas = tdmr->reserved_areas;
|
||||
int idx = *p_idx;
|
||||
|
||||
/* Reserved area must be 4K aligned in offset and size */
|
||||
if (WARN_ON(addr & ~PAGE_MASK || size & ~PAGE_MASK))
|
||||
return -EINVAL;
|
||||
|
||||
if (idx >= max_reserved_per_tdmr) {
|
||||
pr_warn("initialization failed: TDMR [0x%llx, 0x%llx): reserved areas exhausted.\n",
|
||||
tdmr->base, tdmr_end(tdmr));
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
/*
|
||||
* Consume one reserved area per call. Make no effort to
|
||||
* optimize or reduce the number of reserved areas which are
|
||||
* consumed by contiguous reserved areas, for instance.
|
||||
*/
|
||||
rsvd_areas[idx].offset = addr - tdmr->base;
|
||||
rsvd_areas[idx].size = size;
|
||||
|
||||
*p_idx = idx + 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Go through @tmb_list to find holes between memory areas. If any of
|
||||
* those holes fall within @tdmr, set up a TDMR reserved area to cover
|
||||
* the hole.
|
||||
*/
|
||||
static int tdmr_populate_rsvd_holes(struct list_head *tmb_list,
|
||||
struct tdmr_info *tdmr,
|
||||
int *rsvd_idx,
|
||||
u16 max_reserved_per_tdmr)
|
||||
{
|
||||
struct tdx_memblock *tmb;
|
||||
u64 prev_end;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Start looking for reserved blocks at the
|
||||
* beginning of the TDMR.
|
||||
*/
|
||||
prev_end = tdmr->base;
|
||||
list_for_each_entry(tmb, tmb_list, list) {
|
||||
u64 start, end;
|
||||
|
||||
start = PFN_PHYS(tmb->start_pfn);
|
||||
end = PFN_PHYS(tmb->end_pfn);
|
||||
|
||||
/* Break if this region is after the TDMR */
|
||||
if (start >= tdmr_end(tdmr))
|
||||
break;
|
||||
|
||||
/* Exclude regions before this TDMR */
|
||||
if (end < tdmr->base)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Skip over memory areas that
|
||||
* have already been dealt with.
|
||||
*/
|
||||
if (start <= prev_end) {
|
||||
prev_end = end;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Add the hole before this region */
|
||||
ret = tdmr_add_rsvd_area(tdmr, rsvd_idx, prev_end,
|
||||
start - prev_end,
|
||||
max_reserved_per_tdmr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
prev_end = end;
|
||||
}
|
||||
|
||||
/* Add the hole after the last region if it exists. */
|
||||
if (prev_end < tdmr_end(tdmr)) {
|
||||
ret = tdmr_add_rsvd_area(tdmr, rsvd_idx, prev_end,
|
||||
tdmr_end(tdmr) - prev_end,
|
||||
max_reserved_per_tdmr);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Go through @tdmr_list to find all PAMTs. If any of those PAMTs
|
||||
* overlaps with @tdmr, set up a TDMR reserved area to cover the
|
||||
* overlapping part.
|
||||
*/
|
||||
static int tdmr_populate_rsvd_pamts(struct tdmr_info_list *tdmr_list,
|
||||
struct tdmr_info *tdmr,
|
||||
int *rsvd_idx,
|
||||
u16 max_reserved_per_tdmr)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++) {
|
||||
struct tdmr_info *tmp = tdmr_entry(tdmr_list, i);
|
||||
unsigned long pamt_base, pamt_size, pamt_end;
|
||||
|
||||
tdmr_get_pamt(tmp, &pamt_base, &pamt_size);
|
||||
/* Each TDMR must already have PAMT allocated */
|
||||
WARN_ON_ONCE(!pamt_size || !pamt_base);
|
||||
|
||||
pamt_end = pamt_base + pamt_size;
|
||||
/* Skip PAMTs outside of the given TDMR */
|
||||
if ((pamt_end <= tdmr->base) ||
|
||||
(pamt_base >= tdmr_end(tdmr)))
|
||||
continue;
|
||||
|
||||
/* Only mark the part within the TDMR as reserved */
|
||||
if (pamt_base < tdmr->base)
|
||||
pamt_base = tdmr->base;
|
||||
if (pamt_end > tdmr_end(tdmr))
|
||||
pamt_end = tdmr_end(tdmr);
|
||||
|
||||
ret = tdmr_add_rsvd_area(tdmr, rsvd_idx, pamt_base,
|
||||
pamt_end - pamt_base,
|
||||
max_reserved_per_tdmr);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Compare function called by sort() for TDMR reserved areas */
|
||||
static int rsvd_area_cmp_func(const void *a, const void *b)
|
||||
{
|
||||
struct tdmr_reserved_area *r1 = (struct tdmr_reserved_area *)a;
|
||||
struct tdmr_reserved_area *r2 = (struct tdmr_reserved_area *)b;
|
||||
|
||||
if (r1->offset + r1->size <= r2->offset)
|
||||
return -1;
|
||||
if (r1->offset >= r2->offset + r2->size)
|
||||
return 1;
|
||||
|
||||
/* Reserved areas cannot overlap. The caller must guarantee. */
|
||||
WARN_ON_ONCE(1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Populate reserved areas for the given @tdmr, including memory holes
|
||||
* (via @tmb_list) and PAMTs (via @tdmr_list).
|
||||
*/
|
||||
static int tdmr_populate_rsvd_areas(struct tdmr_info *tdmr,
|
||||
struct list_head *tmb_list,
|
||||
struct tdmr_info_list *tdmr_list,
|
||||
u16 max_reserved_per_tdmr)
|
||||
{
|
||||
int ret, rsvd_idx = 0;
|
||||
|
||||
ret = tdmr_populate_rsvd_holes(tmb_list, tdmr, &rsvd_idx,
|
||||
max_reserved_per_tdmr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = tdmr_populate_rsvd_pamts(tdmr_list, tdmr, &rsvd_idx,
|
||||
max_reserved_per_tdmr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* TDX requires reserved areas listed in address ascending order */
|
||||
sort(tdmr->reserved_areas, rsvd_idx, sizeof(struct tdmr_reserved_area),
|
||||
rsvd_area_cmp_func, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Populate reserved areas for all TDMRs in @tdmr_list, including memory
|
||||
* holes (via @tmb_list) and PAMTs.
|
||||
*/
|
||||
static int tdmrs_populate_rsvd_areas_all(struct tdmr_info_list *tdmr_list,
|
||||
struct list_head *tmb_list,
|
||||
u16 max_reserved_per_tdmr)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++) {
|
||||
int ret;
|
||||
|
||||
ret = tdmr_populate_rsvd_areas(tdmr_entry(tdmr_list, i),
|
||||
tmb_list, tdmr_list, max_reserved_per_tdmr);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Construct a list of TDMRs on the preallocated space in @tdmr_list
|
||||
* to cover all TDX memory regions in @tmb_list based on the TDX module
|
||||
@ -681,14 +883,13 @@ static int construct_tdmrs(struct list_head *tmb_list,
|
||||
tdmr_sysinfo->pamt_entry_size);
|
||||
if (ret)
|
||||
return ret;
|
||||
/*
|
||||
* TODO:
|
||||
*
|
||||
* - Designate reserved areas for each TDMR.
|
||||
*
|
||||
* Return -EINVAL until constructing TDMRs is done
|
||||
*/
|
||||
return -EINVAL;
|
||||
|
||||
ret = tdmrs_populate_rsvd_areas_all(tdmr_list, tmb_list,
|
||||
tdmr_sysinfo->max_reserved_per_tdmr);
|
||||
if (ret)
|
||||
tdmrs_free_pamt_all(tdmr_list);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int init_tdx_module(void)
|
||||
|
Reference in New Issue
Block a user