Re: [PATCH v5] mm/vmalloc: lock contention optimization under multi-threading

From: Baoquan He
Date: Fri Feb 23 2024 - 09:03:59 EST


Hi Rulin,

On 02/23/24 at 08:03am, rulinhuang wrote:
> When allocating a new memory area where the mapping address range is
> known, it is observed that the vmap_area lock is acquired twice.
> The first acquisition occurs in the alloc_vmap_area() function when
> inserting the vm area into the vm mapping red-black tree. The second
> acquisition occurs in the setup_vmalloc_vm() function when updating the
> properties of the vm, such as flags and address, etc.
> Combine these two operations together in alloc_vmap_area(), which
> improves scalability when the vmap_area lock is contended. By doing so,
> the need to acquire the lock twice can also be eliminated.
> With the above change, tested on intel icelake platform(160 vcpu, kernel
> v6.7), a 6% performance improvement and a 7% reduction in overall
> spinlock hotspot are gained on
> stress-ng/pthread(https://github.com/ColinIanKing/stress-ng), which is
> the stress test of thread creations.

This log can be rearranged into several paragraphes for easier reading.

>
> Reviewed-by: Chen Tim C <tim.c.chen@xxxxxxxxx>
> Reviewed-by: King Colin <colin.king@xxxxxxxxx>

Since you have taken different way to fix, these Reviewed-by from old
solution and patch should be removed. Carrying them is

Yeah, this v5 is what I suggested in the draft. It looks good to me.
There's one concern in code, plesae see inline comment.

> Signed-off-by: rulinhuang <rulin.huang@xxxxxxxxx>
> ---
> V1 -> V2: Avoided the partial initialization issue of vm and
> separated insert_vmap_area() from alloc_vmap_area()
> V2 -> V3: Rebased on 6.8-rc5
> V3 -> V4: Rebased on mm-unstable branch
> V4 -> V5: cancel the split of alloc_vmap_area()
> and keep insert_vmap_area()
> ---
> mm/vmalloc.c | 48 ++++++++++++++++++++++--------------------------
> 1 file changed, 22 insertions(+), 26 deletions(-)
>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 25a8df497255..6baaf08737f8 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -1841,15 +1841,26 @@ node_alloc(unsigned long size, unsigned long align,
> return va;
> }
>
> +static inline void setup_vmalloc_vm(struct vm_struct *vm,
> + struct vmap_area *va, unsigned long flags, const void *caller)
> +{
> + vm->flags = flags;
> + vm->addr = (void *)va->va_start;
> + vm->size = va->va_end - va->va_start;
> + vm->caller = caller;
> + va->vm = vm;
> +}
> +
> /*
> * Allocate a region of KVA of the specified size and alignment, within the
> - * vstart and vend.
> + * vstart and vend. If vm is passed in, the two will also be bound.
> */
> static struct vmap_area *alloc_vmap_area(unsigned long size,
> unsigned long align,
> unsigned long vstart, unsigned long vend,
> int node, gfp_t gfp_mask,
> - unsigned long va_flags)
> + unsigned long va_flags, struct vm_struct *vm,
> + unsigned long flags, const void *caller)
> {
> struct vmap_node *vn;
> struct vmap_area *va;
> @@ -1912,6 +1923,9 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
> va->vm = NULL;
> va->flags = (va_flags | vn_id);
>

We may need add a BUG_ON() or WARN_ON() checking if (va_flags & VMAP_RAM)
is true and vm is not NULL. Not sure if this is over thinking.

By the way, can you post it separately if you decide to post v6 to
polish the log and remove the ack tag? Sometime adding all posts in one
thread looks so confusing.

Thanks
Baoquan

> + if (vm)
> + setup_vmalloc_vm(vm, va, flags, caller);
> +
> vn = addr_to_node(va->va_start);
>
> spin_lock(&vn->busy.lock);
> @@ -2486,7 +2500,8 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
> va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
> VMALLOC_START, VMALLOC_END,
> node, gfp_mask,
> - VMAP_RAM|VMAP_BLOCK);
> + VMAP_RAM|VMAP_BLOCK, NULL,
> + 0, NULL);
> if (IS_ERR(va)) {
> kfree(vb);
> return ERR_CAST(va);
> @@ -2843,7 +2858,8 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node)
> struct vmap_area *va;
> va = alloc_vmap_area(size, PAGE_SIZE,
> VMALLOC_START, VMALLOC_END,
> - node, GFP_KERNEL, VMAP_RAM);
> + node, GFP_KERNEL, VMAP_RAM,
> + NULL, 0, NULL);
> if (IS_ERR(va))
> return NULL;
>
> @@ -2946,26 +2962,6 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align)
> kasan_populate_early_vm_area_shadow(vm->addr, vm->size);
> }
>
> -static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
> - struct vmap_area *va, unsigned long flags, const void *caller)
> -{
> - vm->flags = flags;
> - vm->addr = (void *)va->va_start;
> - vm->size = va->va_end - va->va_start;
> - vm->caller = caller;
> - va->vm = vm;
> -}
> -
> -static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
> - unsigned long flags, const void *caller)
> -{
> - struct vmap_node *vn = addr_to_node(va->va_start);
> -
> - spin_lock(&vn->busy.lock);
> - setup_vmalloc_vm_locked(vm, va, flags, caller);
> - spin_unlock(&vn->busy.lock);
> -}
> -
> static void clear_vm_uninitialized_flag(struct vm_struct *vm)
> {
> /*
> @@ -3002,7 +2998,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
> if (!(flags & VM_NO_GUARD))
> size += PAGE_SIZE;
>
> - va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0);
> + va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area, flags, caller);
> if (IS_ERR(va)) {
> kfree(area);
> return NULL;
> @@ -4584,7 +4580,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
>
> spin_lock(&vn->busy.lock);
> insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head);
> - setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
> + setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
> pcpu_get_vm_areas);
> spin_unlock(&vn->busy.lock);
> }
>
> base-commit: c09a8e005eff6c064e2e9f11549966c36a724fbf
> --
> 2.43.0
>