Re: [PATCH v2 04/12] mm/execmem, arch: convert remaining overrides of module_alloc to execmem

From: Song Liu
Date: Fri Jun 16 2023 - 14:54:33 EST


On Fri, Jun 16, 2023 at 1:51 AM Mike Rapoport <rppt@xxxxxxxxxx> wrote:
[...]
> diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
> index 5af4975caeb5..c3d999f3a3dd 100644
> --- a/arch/arm64/kernel/module.c
> +++ b/arch/arm64/kernel/module.c
> @@ -17,56 +17,50 @@
> #include <linux/moduleloader.h>
> #include <linux/scs.h>
> #include <linux/vmalloc.h>
> +#include <linux/execmem.h>
> #include <asm/alternative.h>
> #include <asm/insn.h>
> #include <asm/scs.h>
> #include <asm/sections.h>
>
> -void *module_alloc(unsigned long size)
> +static struct execmem_params execmem_params = {
> + .modules = {
> + .flags = EXECMEM_KASAN_SHADOW,
> + .text = {
> + .alignment = MODULE_ALIGN,
> + },
> + },
> +};
> +
> +struct execmem_params __init *execmem_arch_params(void)
> {
> u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
> - gfp_t gfp_mask = GFP_KERNEL;
> - void *p;
> -
> - /* Silence the initial allocation */
> - if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
> - gfp_mask |= __GFP_NOWARN;
>
> - if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
> - IS_ENABLED(CONFIG_KASAN_SW_TAGS))
> - /* don't exceed the static module region - see below */
> - module_alloc_end = MODULES_END;
> + execmem_params.modules.text.pgprot = PAGE_KERNEL;
> + execmem_params.modules.text.start = module_alloc_base;

I think I mentioned this earlier. For arm64 with CONFIG_RANDOMIZE_BASE,
module_alloc_base is not yet set when execmem_arch_params() is
called. So we will need some extra logic for this.

Thanks,
Song


> + execmem_params.modules.text.end = module_alloc_end;
>
> - p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
> - module_alloc_end, gfp_mask, PAGE_KERNEL, VM_DEFER_KMEMLEAK,
> - NUMA_NO_NODE, __builtin_return_address(0));
> -
> - if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
> + /*
> + * KASAN without KASAN_VMALLOC can only deal with module
> + * allocations being served from the reserved module region,
> + * since the remainder of the vmalloc region is already
> + * backed by zero shadow pages, and punching holes into it
> + * is non-trivial. Since the module region is not randomized
> + * when KASAN is enabled without KASAN_VMALLOC, it is even
> + * less likely that the module region gets exhausted, so we
> + * can simply omit this fallback in that case.
> + */
> + if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
> (IS_ENABLED(CONFIG_KASAN_VMALLOC) ||
> (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
> - !IS_ENABLED(CONFIG_KASAN_SW_TAGS))))
> - /*
> - * KASAN without KASAN_VMALLOC can only deal with module
> - * allocations being served from the reserved module region,
> - * since the remainder of the vmalloc region is already
> - * backed by zero shadow pages, and punching holes into it
> - * is non-trivial. Since the module region is not randomized
> - * when KASAN is enabled without KASAN_VMALLOC, it is even
> - * less likely that the module region gets exhausted, so we
> - * can simply omit this fallback in that case.
> - */
> - p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
> - module_alloc_base + SZ_2G, GFP_KERNEL,
> - PAGE_KERNEL, 0, NUMA_NO_NODE,
> - __builtin_return_address(0));
> -
> - if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
> - vfree(p);
> - return NULL;
> + !IS_ENABLED(CONFIG_KASAN_SW_TAGS)))) {
> + unsigned long end = module_alloc_base + SZ_2G;
> +
> + execmem_params.modules.text.fallback_start = module_alloc_base;
> + execmem_params.modules.text.fallback_end = end;
> }
>
> - /* Memory is intended to be executable, reset the pointer tag. */
> - return kasan_reset_tag(p);
> + return &execmem_params;
> }
>
> enum aarch64_reloc_op {