[patch 16/30] x86/microcode/intel: Switch to kvmalloc()

From: Thomas Gleixner
Date: Thu Aug 10 2023 - 14:38:46 EST


From: Thomas Gleixner <tglx@xxxxxxxxxxxxx>

Microcode blobs are getting larger and might soon reach the kmalloc()
limit. Switch over kvmalloc().

32-bit has to stay with kmalloc() as it needs physically contiguous memory
because the early loading runs before paging is enabled, so there is a
sanity check added to ensure that.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>

---
arch/x86/kernel/cpu/microcode/intel.c | 55 +++++++++++++++++++---------------
1 file changed, 32 insertions(+), 23 deletions(-)
---
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -14,7 +14,6 @@
#include <linux/earlycpio.h>
#include <linux/firmware.h>
#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
#include <linux/initrd.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -215,7 +214,7 @@ EXPORT_SYMBOL_GPL(intel_microcode_sanity

static void update_ucode_pointer(struct microcode_intel *mc)
{
- kfree(ucode_patch_va);
+ kvfree(ucode_patch_va);

/*
* Save the virtual address for early loading on 64bit
@@ -229,13 +228,18 @@ static void update_ucode_pointer(struct

static void save_microcode_patch(struct microcode_intel *patch)
{
- struct microcode_intel *mc;
+ unsigned int size = get_totalsize(&patch->hdr);
+ struct microcode_intel *mc = NULL;
+
+ if (IS_ENABLED(CONFIG_X86_64))
+ mc = kvmemdup(patch, size, GFP_KERNEL);
+ else
+ mc = kmemdup(patch, size, GFP_KERNEL);

- mc = kmemdup(patch, get_totalsize(&patch->hdr), GFP_KERNEL);
if (mc)
update_ucode_pointer(mc);
else
- pr_err("Unable to allocate microcode memory\n");
+ pr_err("Unable to allocate microcode memory size: %u\n", size);
}

/* Scan CPIO for microcode matching the boot CPUs family, model, stepping */
@@ -586,36 +590,34 @@ static enum ucode_state read_ucode_intel

if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) {
pr_err("error! Truncated or inaccessible header in microcode data file\n");
- break;
+ goto fail;
}

mc_size = get_totalsize(&mc_header);
if (mc_size < sizeof(mc_header)) {
pr_err("error! Bad data in microcode data file (totalsize too small)\n");
- break;
+ goto fail;
}
-
data_size = mc_size - sizeof(mc_header);
if (data_size > iov_iter_count(iter)) {
pr_err("error! Bad data in microcode data file (truncated file?)\n");
- break;
+ goto fail;
}

/* For performance reasons, reuse mc area when possible */
if (!mc || mc_size > curr_mc_size) {
- vfree(mc);
- mc = vmalloc(mc_size);
+ kvfree(mc);
+ mc = kvmalloc(mc_size, GFP_KERNEL);
if (!mc)
- break;
+ goto fail;
curr_mc_size = mc_size;
}

memcpy(mc, &mc_header, sizeof(mc_header));
data = mc + sizeof(mc_header);
if (!copy_from_iter_full(data, data_size, iter) ||
- intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0) {
- break;
- }
+ intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0)
+ goto fail;

if (cur_rev >= mc_header.rev)
continue;
@@ -623,25 +625,32 @@ static enum ucode_state read_ucode_intel
if (!intel_find_matching_signature(mc, uci->cpu_sig.sig, uci->cpu_sig.pf))
continue;

- vfree(new_mc);
+ kvfree(new_mc);
cur_rev = mc_header.rev;
new_mc = mc;
new_mc_size = mc_size;
mc = NULL;
}

- vfree(mc);
+ if (iov_iter_count(iter))
+ goto fail;

- if (iov_iter_count(iter)) {
- vfree(new_mc);
- return UCODE_ERROR;
+ if (IS_ENABLED(CONFIG_X86_32) && new_mc && is_vmalloc_addr(new_mc)) {
+ pr_err("Microcode too large for 32-bit mode\n");
+ goto fail;
}

+ kvfree(mc);
if (!new_mc)
return UCODE_NFOUND;

ucode_patch_late = (struct microcode_intel *)new_mc;
return UCODE_NEW;
+
+fail:
+ kvfree(mc);
+ kvfree(new_mc);
+ return UCODE_ERROR;
}

static bool is_blacklisted(unsigned int cpu)
@@ -700,9 +709,9 @@ static enum ucode_state request_microcod
static void finalize_late_load(int result)
{
if (!result)
- save_microcode_patch(ucode_patch_late);
-
- vfree(ucode_patch_late);
+ update_ucode_pointer(ucode_patch_late);
+ else
+ kvfree(ucode_patch_late);
ucode_patch_late = NULL;
}