x86/microcode/intel: Switch to kvmalloc()

Microcode blobs are getting larger and might soon reach the kmalloc()
limit. Switch over kvmalloc().

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20231002115902.564323243@linutronix.de
This commit is contained in:
Thomas Gleixner 2023-10-02 13:59:45 +02:00 committed by Borislav Petkov (AMD)
parent 2a1dada3d1
commit f24f204405

View File

@ -14,7 +14,6 @@
#include <linux/earlycpio.h>
#include <linux/firmware.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/initrd.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@ -245,7 +244,7 @@ EXPORT_SYMBOL_GPL(intel_microcode_sanity_check);
static void update_ucode_pointer(struct microcode_intel *mc)
{
kfree(ucode_patch_va);
kvfree(ucode_patch_va);
/*
* Save the virtual address for early loading and for eventual free
@ -256,11 +255,14 @@ static void update_ucode_pointer(struct microcode_intel *mc)
static void save_microcode_patch(struct microcode_intel *patch)
{
unsigned int size = get_totalsize(&patch->hdr);
struct microcode_intel *mc;
mc = kmemdup(patch, get_totalsize(&patch->hdr), GFP_KERNEL);
mc = kvmemdup(patch, size, GFP_KERNEL);
if (mc)
update_ucode_pointer(mc);
else
pr_err("Unable to allocate microcode memory size: %u\n", size);
}
/* Scan blob for microcode matching the boot CPUs family, model, stepping */
@ -539,36 +541,34 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter)
if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) {
pr_err("error! Truncated or inaccessible header in microcode data file\n");
break;
goto fail;
}
mc_size = get_totalsize(&mc_header);
if (mc_size < sizeof(mc_header)) {
pr_err("error! Bad data in microcode data file (totalsize too small)\n");
break;
goto fail;
}
data_size = mc_size - sizeof(mc_header);
if (data_size > iov_iter_count(iter)) {
pr_err("error! Bad data in microcode data file (truncated file?)\n");
break;
goto fail;
}
/* For performance reasons, reuse mc area when possible */
if (!mc || mc_size > curr_mc_size) {
vfree(mc);
mc = vmalloc(mc_size);
kvfree(mc);
mc = kvmalloc(mc_size, GFP_KERNEL);
if (!mc)
break;
goto fail;
curr_mc_size = mc_size;
}
memcpy(mc, &mc_header, sizeof(mc_header));
data = mc + sizeof(mc_header);
if (!copy_from_iter_full(data, data_size, iter) ||
intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0) {
break;
}
intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0)
goto fail;
if (cur_rev >= mc_header.rev)
continue;
@ -576,24 +576,26 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter)
if (!intel_find_matching_signature(mc, uci->cpu_sig.sig, uci->cpu_sig.pf))
continue;
vfree(new_mc);
kvfree(new_mc);
cur_rev = mc_header.rev;
new_mc = mc;
mc = NULL;
}
vfree(mc);
if (iov_iter_count(iter)) {
vfree(new_mc);
return UCODE_ERROR;
}
if (iov_iter_count(iter))
goto fail;
kvfree(mc);
if (!new_mc)
return UCODE_NFOUND;
ucode_patch_late = (struct microcode_intel *)new_mc;
return UCODE_NEW;
fail:
kvfree(mc);
kvfree(new_mc);
return UCODE_ERROR;
}
static bool is_blacklisted(unsigned int cpu)
@ -652,9 +654,9 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
static void finalize_late_load(int result)
{
if (!result)
save_microcode_patch(ucode_patch_late);
vfree(ucode_patch_late);
update_ucode_pointer(ucode_patch_late);
else
kvfree(ucode_patch_late);
ucode_patch_late = NULL;
}