- Free shmem backing storage for SGX enclave pages when those are
swapped back into EPC memory - Prevent do_int3() from being kprobed, to avoid recursion - Remap setup_data and setup_indirect structures properly when accessing their members - Correct the alternatives patching order for modules too -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmItzJgACgkQEsHwGGHe VUqaow/8C115xuZEBn+iT+adcQxbqrg3S2en/Hq0aJOEhkNkbOhgAW0OWHvj7Gs3 +2taD35MqzneEOfa0Gv46600V4+SV5K5NAFndr4PA2FVgIw01rEQios2oc4QSQBP PVJgvGyIMpN71ODKTiZ8w4ihp3J7MWDkCP1z4hbO/lfM4tOXcYzh2Lv1fE8hHr5b qFtPDyYgfEKUVFa+sv2sE1cJw670UFDcFqGAIjxUUm0r78GKmPz08gZm9YiBTJgV jrxySdpAh/eaPeHNfFH9RzAD2ZGZppgIkPCp33ZdrMEhnZmwLz7vc76BMbkD2P6w 1fBmBZ5F8yOMaaLHSGh4Ek5Gs3p9DjmaZdEWwz+yiIe1RFLKyOQu6gsmGbAyuQx4 KSfPFnfkOfw/7cz6BSp3Sh6zgrGPqloIVcHkWRth/LJZSV/fVgM8bPg3VLJP6WFi o4WTcNAq/fNMAmGwtIVpTUW/QJafXvOauKkDGQkMQ87U68QSh6uDrvrvMHPF8W+Y SPcYrdsAPagLxq0GCCQ6doSvBjWNTolXfTnfAoATZpae0URmrvu9ddgUbIlgeQWY n/rK+cKk+iuLTEZC55+v5OALwEMOM3Tuz4Ghko8re0pkD/kE61m3Az6w5sKN3Inc c21tvO/dxHhAnHV+34d2LM27PU4qoFdVO2mPup702x68XT+X0/g= =YLph -----END PGP SIGNATURE----- Merge tag 'x86_urgent_for_v5.17_rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 fixes from Borislav Petkov: - Free shmem backing storage for SGX enclave pages when those are swapped back into EPC memory - Prevent do_int3() from being kprobed, to avoid recursion - Remap setup_data and setup_indirect structures properly when accessing their members - Correct the alternatives patching order for modules too * tag 'x86_urgent_for_v5.17_rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/sgx: Free backing memory after faulting the enclave page x86/traps: Mark do_int3() NOKPROBE_SYMBOL x86/boot: Add setup_indirect support in early_memremap_is_setup_data() x86/boot: Fix memremap of setup_indirect structures x86/module: Fix the paravirt vs alternative order
This commit is contained in:
commit
f0e18b03fc
@ -12,6 +12,30 @@
|
||||
#include "encls.h"
|
||||
#include "sgx.h"
|
||||
|
||||
/*
|
||||
* Calculate byte offset of a PCMD struct associated with an enclave page. PCMD's
|
||||
* follow right after the EPC data in the backing storage. In addition to the
|
||||
* visible enclave pages, there's one extra page slot for SECS, before PCMD
|
||||
* structs.
|
||||
*/
|
||||
static inline pgoff_t sgx_encl_get_backing_page_pcmd_offset(struct sgx_encl *encl,
|
||||
unsigned long page_index)
|
||||
{
|
||||
pgoff_t epc_end_off = encl->size + sizeof(struct sgx_secs);
|
||||
|
||||
return epc_end_off + page_index * sizeof(struct sgx_pcmd);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free a page from the backing storage in the given page index.
|
||||
*/
|
||||
static inline void sgx_encl_truncate_backing_page(struct sgx_encl *encl, unsigned long page_index)
|
||||
{
|
||||
struct inode *inode = file_inode(encl->backing);
|
||||
|
||||
shmem_truncate_range(inode, PFN_PHYS(page_index), PFN_PHYS(page_index) + PAGE_SIZE - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* ELDU: Load an EPC page as unblocked. For more info, see "OS Management of EPC
|
||||
* Pages" in the SDM.
|
||||
@ -22,9 +46,11 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
|
||||
{
|
||||
unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK;
|
||||
struct sgx_encl *encl = encl_page->encl;
|
||||
pgoff_t page_index, page_pcmd_off;
|
||||
struct sgx_pageinfo pginfo;
|
||||
struct sgx_backing b;
|
||||
pgoff_t page_index;
|
||||
bool pcmd_page_empty;
|
||||
u8 *pcmd_page;
|
||||
int ret;
|
||||
|
||||
if (secs_page)
|
||||
@ -32,14 +58,16 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
|
||||
else
|
||||
page_index = PFN_DOWN(encl->size);
|
||||
|
||||
page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
|
||||
|
||||
ret = sgx_encl_get_backing(encl, page_index, &b);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pginfo.addr = encl_page->desc & PAGE_MASK;
|
||||
pginfo.contents = (unsigned long)kmap_atomic(b.contents);
|
||||
pginfo.metadata = (unsigned long)kmap_atomic(b.pcmd) +
|
||||
b.pcmd_offset;
|
||||
pcmd_page = kmap_atomic(b.pcmd);
|
||||
pginfo.metadata = (unsigned long)pcmd_page + b.pcmd_offset;
|
||||
|
||||
if (secs_page)
|
||||
pginfo.secs = (u64)sgx_get_epc_virt_addr(secs_page);
|
||||
@ -55,11 +83,24 @@ static int __sgx_encl_eldu(struct sgx_encl_page *encl_page,
|
||||
ret = -EFAULT;
|
||||
}
|
||||
|
||||
kunmap_atomic((void *)(unsigned long)(pginfo.metadata - b.pcmd_offset));
|
||||
memset(pcmd_page + b.pcmd_offset, 0, sizeof(struct sgx_pcmd));
|
||||
|
||||
/*
|
||||
* The area for the PCMD in the page was zeroed above. Check if the
|
||||
* whole page is now empty meaning that all PCMD's have been zeroed:
|
||||
*/
|
||||
pcmd_page_empty = !memchr_inv(pcmd_page, 0, PAGE_SIZE);
|
||||
|
||||
kunmap_atomic(pcmd_page);
|
||||
kunmap_atomic((void *)(unsigned long)pginfo.contents);
|
||||
|
||||
sgx_encl_put_backing(&b, false);
|
||||
|
||||
sgx_encl_truncate_backing_page(encl, page_index);
|
||||
|
||||
if (pcmd_page_empty)
|
||||
sgx_encl_truncate_backing_page(encl, PFN_DOWN(page_pcmd_off));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -579,7 +620,7 @@ static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl,
|
||||
int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
|
||||
struct sgx_backing *backing)
|
||||
{
|
||||
pgoff_t pcmd_index = PFN_DOWN(encl->size) + 1 + (page_index >> 5);
|
||||
pgoff_t page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
|
||||
struct page *contents;
|
||||
struct page *pcmd;
|
||||
|
||||
@ -587,7 +628,7 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
|
||||
if (IS_ERR(contents))
|
||||
return PTR_ERR(contents);
|
||||
|
||||
pcmd = sgx_encl_get_backing_page(encl, pcmd_index);
|
||||
pcmd = sgx_encl_get_backing_page(encl, PFN_DOWN(page_pcmd_off));
|
||||
if (IS_ERR(pcmd)) {
|
||||
put_page(contents);
|
||||
return PTR_ERR(pcmd);
|
||||
@ -596,9 +637,7 @@ int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
|
||||
backing->page_index = page_index;
|
||||
backing->contents = contents;
|
||||
backing->pcmd = pcmd;
|
||||
backing->pcmd_offset =
|
||||
(page_index & (PAGE_SIZE / sizeof(struct sgx_pcmd) - 1)) *
|
||||
sizeof(struct sgx_pcmd);
|
||||
backing->pcmd_offset = page_pcmd_off & (PAGE_SIZE - 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -995,8 +995,10 @@ early_param("memmap", parse_memmap_opt);
|
||||
*/
|
||||
void __init e820__reserve_setup_data(void)
|
||||
{
|
||||
struct setup_indirect *indirect;
|
||||
struct setup_data *data;
|
||||
u64 pa_data;
|
||||
u64 pa_data, pa_next;
|
||||
u32 len;
|
||||
|
||||
pa_data = boot_params.hdr.setup_data;
|
||||
if (!pa_data)
|
||||
@ -1004,6 +1006,14 @@ void __init e820__reserve_setup_data(void)
|
||||
|
||||
while (pa_data) {
|
||||
data = early_memremap(pa_data, sizeof(*data));
|
||||
if (!data) {
|
||||
pr_warn("e820: failed to memremap setup_data entry\n");
|
||||
return;
|
||||
}
|
||||
|
||||
len = sizeof(*data);
|
||||
pa_next = data->next;
|
||||
|
||||
e820__range_update(pa_data, sizeof(*data)+data->len, E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
||||
|
||||
/*
|
||||
@ -1015,18 +1025,27 @@ void __init e820__reserve_setup_data(void)
|
||||
sizeof(*data) + data->len,
|
||||
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
||||
|
||||
if (data->type == SETUP_INDIRECT &&
|
||||
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
|
||||
e820__range_update(((struct setup_indirect *)data->data)->addr,
|
||||
((struct setup_indirect *)data->data)->len,
|
||||
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
||||
e820__range_update_kexec(((struct setup_indirect *)data->data)->addr,
|
||||
((struct setup_indirect *)data->data)->len,
|
||||
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
||||
if (data->type == SETUP_INDIRECT) {
|
||||
len += data->len;
|
||||
early_memunmap(data, sizeof(*data));
|
||||
data = early_memremap(pa_data, len);
|
||||
if (!data) {
|
||||
pr_warn("e820: failed to memremap indirect setup_data\n");
|
||||
return;
|
||||
}
|
||||
|
||||
indirect = (struct setup_indirect *)data->data;
|
||||
|
||||
if (indirect->type != SETUP_INDIRECT) {
|
||||
e820__range_update(indirect->addr, indirect->len,
|
||||
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
||||
e820__range_update_kexec(indirect->addr, indirect->len,
|
||||
E820_TYPE_RAM, E820_TYPE_RESERVED_KERN);
|
||||
}
|
||||
}
|
||||
|
||||
pa_data = data->next;
|
||||
early_memunmap(data, sizeof(*data));
|
||||
pa_data = pa_next;
|
||||
early_memunmap(data, len);
|
||||
}
|
||||
|
||||
e820__update_table(e820_table);
|
||||
|
@ -88,11 +88,13 @@ create_setup_data_node(struct dentry *parent, int no,
|
||||
|
||||
static int __init create_setup_data_nodes(struct dentry *parent)
|
||||
{
|
||||
struct setup_indirect *indirect;
|
||||
struct setup_data_node *node;
|
||||
struct setup_data *data;
|
||||
int error;
|
||||
u64 pa_data, pa_next;
|
||||
struct dentry *d;
|
||||
u64 pa_data;
|
||||
int error;
|
||||
u32 len;
|
||||
int no = 0;
|
||||
|
||||
d = debugfs_create_dir("setup_data", parent);
|
||||
@ -112,12 +114,29 @@ static int __init create_setup_data_nodes(struct dentry *parent)
|
||||
error = -ENOMEM;
|
||||
goto err_dir;
|
||||
}
|
||||
pa_next = data->next;
|
||||
|
||||
if (data->type == SETUP_INDIRECT &&
|
||||
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
|
||||
node->paddr = ((struct setup_indirect *)data->data)->addr;
|
||||
node->type = ((struct setup_indirect *)data->data)->type;
|
||||
node->len = ((struct setup_indirect *)data->data)->len;
|
||||
if (data->type == SETUP_INDIRECT) {
|
||||
len = sizeof(*data) + data->len;
|
||||
memunmap(data);
|
||||
data = memremap(pa_data, len, MEMREMAP_WB);
|
||||
if (!data) {
|
||||
kfree(node);
|
||||
error = -ENOMEM;
|
||||
goto err_dir;
|
||||
}
|
||||
|
||||
indirect = (struct setup_indirect *)data->data;
|
||||
|
||||
if (indirect->type != SETUP_INDIRECT) {
|
||||
node->paddr = indirect->addr;
|
||||
node->type = indirect->type;
|
||||
node->len = indirect->len;
|
||||
} else {
|
||||
node->paddr = pa_data;
|
||||
node->type = data->type;
|
||||
node->len = data->len;
|
||||
}
|
||||
} else {
|
||||
node->paddr = pa_data;
|
||||
node->type = data->type;
|
||||
@ -125,7 +144,7 @@ static int __init create_setup_data_nodes(struct dentry *parent)
|
||||
}
|
||||
|
||||
create_setup_data_node(d, no, node);
|
||||
pa_data = data->next;
|
||||
pa_data = pa_next;
|
||||
|
||||
memunmap(data);
|
||||
no++;
|
||||
|
@ -91,26 +91,41 @@ static int get_setup_data_paddr(int nr, u64 *paddr)
|
||||
|
||||
static int __init get_setup_data_size(int nr, size_t *size)
|
||||
{
|
||||
int i = 0;
|
||||
u64 pa_data = boot_params.hdr.setup_data, pa_next;
|
||||
struct setup_indirect *indirect;
|
||||
struct setup_data *data;
|
||||
u64 pa_data = boot_params.hdr.setup_data;
|
||||
int i = 0;
|
||||
u32 len;
|
||||
|
||||
while (pa_data) {
|
||||
data = memremap(pa_data, sizeof(*data), MEMREMAP_WB);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
pa_next = data->next;
|
||||
|
||||
if (nr == i) {
|
||||
if (data->type == SETUP_INDIRECT &&
|
||||
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT)
|
||||
*size = ((struct setup_indirect *)data->data)->len;
|
||||
else
|
||||
if (data->type == SETUP_INDIRECT) {
|
||||
len = sizeof(*data) + data->len;
|
||||
memunmap(data);
|
||||
data = memremap(pa_data, len, MEMREMAP_WB);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
indirect = (struct setup_indirect *)data->data;
|
||||
|
||||
if (indirect->type != SETUP_INDIRECT)
|
||||
*size = indirect->len;
|
||||
else
|
||||
*size = data->len;
|
||||
} else {
|
||||
*size = data->len;
|
||||
}
|
||||
|
||||
memunmap(data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
pa_data = data->next;
|
||||
pa_data = pa_next;
|
||||
memunmap(data);
|
||||
i++;
|
||||
}
|
||||
@ -120,9 +135,11 @@ static int __init get_setup_data_size(int nr, size_t *size)
|
||||
static ssize_t type_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct setup_indirect *indirect;
|
||||
struct setup_data *data;
|
||||
int nr, ret;
|
||||
u64 paddr;
|
||||
struct setup_data *data;
|
||||
u32 len;
|
||||
|
||||
ret = kobj_to_setup_data_nr(kobj, &nr);
|
||||
if (ret)
|
||||
@ -135,10 +152,20 @@ static ssize_t type_show(struct kobject *kobj,
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
if (data->type == SETUP_INDIRECT)
|
||||
ret = sprintf(buf, "0x%x\n", ((struct setup_indirect *)data->data)->type);
|
||||
else
|
||||
if (data->type == SETUP_INDIRECT) {
|
||||
len = sizeof(*data) + data->len;
|
||||
memunmap(data);
|
||||
data = memremap(paddr, len, MEMREMAP_WB);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
indirect = (struct setup_indirect *)data->data;
|
||||
|
||||
ret = sprintf(buf, "0x%x\n", indirect->type);
|
||||
} else {
|
||||
ret = sprintf(buf, "0x%x\n", data->type);
|
||||
}
|
||||
|
||||
memunmap(data);
|
||||
return ret;
|
||||
}
|
||||
@ -149,9 +176,10 @@ static ssize_t setup_data_data_read(struct file *fp,
|
||||
char *buf,
|
||||
loff_t off, size_t count)
|
||||
{
|
||||
struct setup_indirect *indirect;
|
||||
struct setup_data *data;
|
||||
int nr, ret = 0;
|
||||
u64 paddr, len;
|
||||
struct setup_data *data;
|
||||
void *p;
|
||||
|
||||
ret = kobj_to_setup_data_nr(kobj, &nr);
|
||||
@ -165,10 +193,27 @@ static ssize_t setup_data_data_read(struct file *fp,
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
if (data->type == SETUP_INDIRECT &&
|
||||
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
|
||||
paddr = ((struct setup_indirect *)data->data)->addr;
|
||||
len = ((struct setup_indirect *)data->data)->len;
|
||||
if (data->type == SETUP_INDIRECT) {
|
||||
len = sizeof(*data) + data->len;
|
||||
memunmap(data);
|
||||
data = memremap(paddr, len, MEMREMAP_WB);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
indirect = (struct setup_indirect *)data->data;
|
||||
|
||||
if (indirect->type != SETUP_INDIRECT) {
|
||||
paddr = indirect->addr;
|
||||
len = indirect->len;
|
||||
} else {
|
||||
/*
|
||||
* Even though this is technically undefined, return
|
||||
* the data as though it is a normal setup_data struct.
|
||||
* This will at least allow it to be inspected.
|
||||
*/
|
||||
paddr += sizeof(*data);
|
||||
len = data->len;
|
||||
}
|
||||
} else {
|
||||
paddr += sizeof(*data);
|
||||
len = data->len;
|
||||
|
@ -273,6 +273,14 @@ int module_finalize(const Elf_Ehdr *hdr,
|
||||
retpolines = s;
|
||||
}
|
||||
|
||||
/*
|
||||
* See alternative_instructions() for the ordering rules between the
|
||||
* various patching types.
|
||||
*/
|
||||
if (para) {
|
||||
void *pseg = (void *)para->sh_addr;
|
||||
apply_paravirt(pseg, pseg + para->sh_size);
|
||||
}
|
||||
if (retpolines) {
|
||||
void *rseg = (void *)retpolines->sh_addr;
|
||||
apply_retpolines(rseg, rseg + retpolines->sh_size);
|
||||
@ -290,11 +298,6 @@ int module_finalize(const Elf_Ehdr *hdr,
|
||||
tseg, tseg + text->sh_size);
|
||||
}
|
||||
|
||||
if (para) {
|
||||
void *pseg = (void *)para->sh_addr;
|
||||
apply_paravirt(pseg, pseg + para->sh_size);
|
||||
}
|
||||
|
||||
/* make jump label nops */
|
||||
jump_label_apply_nops(me);
|
||||
|
||||
|
@ -369,21 +369,41 @@ static void __init parse_setup_data(void)
|
||||
|
||||
static void __init memblock_x86_reserve_range_setup_data(void)
|
||||
{
|
||||
struct setup_indirect *indirect;
|
||||
struct setup_data *data;
|
||||
u64 pa_data;
|
||||
u64 pa_data, pa_next;
|
||||
u32 len;
|
||||
|
||||
pa_data = boot_params.hdr.setup_data;
|
||||
while (pa_data) {
|
||||
data = early_memremap(pa_data, sizeof(*data));
|
||||
if (!data) {
|
||||
pr_warn("setup: failed to memremap setup_data entry\n");
|
||||
return;
|
||||
}
|
||||
|
||||
len = sizeof(*data);
|
||||
pa_next = data->next;
|
||||
|
||||
memblock_reserve(pa_data, sizeof(*data) + data->len);
|
||||
|
||||
if (data->type == SETUP_INDIRECT &&
|
||||
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT)
|
||||
memblock_reserve(((struct setup_indirect *)data->data)->addr,
|
||||
((struct setup_indirect *)data->data)->len);
|
||||
if (data->type == SETUP_INDIRECT) {
|
||||
len += data->len;
|
||||
early_memunmap(data, sizeof(*data));
|
||||
data = early_memremap(pa_data, len);
|
||||
if (!data) {
|
||||
pr_warn("setup: failed to memremap indirect setup_data\n");
|
||||
return;
|
||||
}
|
||||
|
||||
pa_data = data->next;
|
||||
early_memunmap(data, sizeof(*data));
|
||||
indirect = (struct setup_indirect *)data->data;
|
||||
|
||||
if (indirect->type != SETUP_INDIRECT)
|
||||
memblock_reserve(indirect->addr, indirect->len);
|
||||
}
|
||||
|
||||
pa_data = pa_next;
|
||||
early_memunmap(data, len);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -659,6 +659,7 @@ static bool do_int3(struct pt_regs *regs)
|
||||
|
||||
return res == NOTIFY_STOP;
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_int3);
|
||||
|
||||
static void do_int3_user(struct pt_regs *regs)
|
||||
{
|
||||
|
@ -615,6 +615,7 @@ static bool memremap_is_efi_data(resource_size_t phys_addr,
|
||||
static bool memremap_is_setup_data(resource_size_t phys_addr,
|
||||
unsigned long size)
|
||||
{
|
||||
struct setup_indirect *indirect;
|
||||
struct setup_data *data;
|
||||
u64 paddr, paddr_next;
|
||||
|
||||
@ -627,6 +628,10 @@ static bool memremap_is_setup_data(resource_size_t phys_addr,
|
||||
|
||||
data = memremap(paddr, sizeof(*data),
|
||||
MEMREMAP_WB | MEMREMAP_DEC);
|
||||
if (!data) {
|
||||
pr_warn("failed to memremap setup_data entry\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
paddr_next = data->next;
|
||||
len = data->len;
|
||||
@ -636,10 +641,21 @@ static bool memremap_is_setup_data(resource_size_t phys_addr,
|
||||
return true;
|
||||
}
|
||||
|
||||
if (data->type == SETUP_INDIRECT &&
|
||||
((struct setup_indirect *)data->data)->type != SETUP_INDIRECT) {
|
||||
paddr = ((struct setup_indirect *)data->data)->addr;
|
||||
len = ((struct setup_indirect *)data->data)->len;
|
||||
if (data->type == SETUP_INDIRECT) {
|
||||
memunmap(data);
|
||||
data = memremap(paddr, sizeof(*data) + len,
|
||||
MEMREMAP_WB | MEMREMAP_DEC);
|
||||
if (!data) {
|
||||
pr_warn("failed to memremap indirect setup_data\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
indirect = (struct setup_indirect *)data->data;
|
||||
|
||||
if (indirect->type != SETUP_INDIRECT) {
|
||||
paddr = indirect->addr;
|
||||
len = indirect->len;
|
||||
}
|
||||
}
|
||||
|
||||
memunmap(data);
|
||||
@ -660,22 +676,51 @@ static bool memremap_is_setup_data(resource_size_t phys_addr,
|
||||
static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
|
||||
unsigned long size)
|
||||
{
|
||||
struct setup_indirect *indirect;
|
||||
struct setup_data *data;
|
||||
u64 paddr, paddr_next;
|
||||
|
||||
paddr = boot_params.hdr.setup_data;
|
||||
while (paddr) {
|
||||
unsigned int len;
|
||||
unsigned int len, size;
|
||||
|
||||
if (phys_addr == paddr)
|
||||
return true;
|
||||
|
||||
data = early_memremap_decrypted(paddr, sizeof(*data));
|
||||
if (!data) {
|
||||
pr_warn("failed to early memremap setup_data entry\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
size = sizeof(*data);
|
||||
|
||||
paddr_next = data->next;
|
||||
len = data->len;
|
||||
|
||||
early_memunmap(data, sizeof(*data));
|
||||
if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
|
||||
early_memunmap(data, sizeof(*data));
|
||||
return true;
|
||||
}
|
||||
|
||||
if (data->type == SETUP_INDIRECT) {
|
||||
size += len;
|
||||
early_memunmap(data, sizeof(*data));
|
||||
data = early_memremap_decrypted(paddr, size);
|
||||
if (!data) {
|
||||
pr_warn("failed to early memremap indirect setup_data\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
indirect = (struct setup_indirect *)data->data;
|
||||
|
||||
if (indirect->type != SETUP_INDIRECT) {
|
||||
paddr = indirect->addr;
|
||||
len = indirect->len;
|
||||
}
|
||||
}
|
||||
|
||||
early_memunmap(data, size);
|
||||
|
||||
if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
|
||||
return true;
|
||||
|
Loading…
x
Reference in New Issue
Block a user